repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NEDMP | NEDMP-main/src/DMP/SIS.py | # -*- encoding: utf-8 -*-
'''
@File : SIS.py
@Time : 2021/05/21 15:57:18
@Author : Fei gao
@Contact : feig@mail.bnu.edu.cn
BNU, Beijing, China
'''
import torch
from torch_scatter import scatter
from copy import deepcopy
from src.DMP.utils import edgeList
class DMP_SIS():
def __init__(self, weight_adj, nodes_p, max_steps=30):
"""
This class implemented the rDMP equations for SIS model on graph.
The details of this model refers to equations (2) (3) in paper:
Shrestha, Munik, Samuel V. Scarpino, and Cristopher Moore.
"Message-passing approach for recurrent-state epidemic models
on networks." Physical Review E 92.2 (2015): 022821.
This implementation supports setting different parameters for edges and nodes.
Parameters:
weight_adj: np.array of size [N, N], storing the edge weight of each edge
nodes_p: np.array of size [N], the probability for each infected nodes return to susceptible
"""
src_nodes, tar_nodes, edge_weight, cave_idx = edgeList(weight_adj)
self.src_nodes = torch.LongTensor(src_nodes)
self.tar_ndoes = torch.LongTensor(tar_nodes)
self.edge_weight = torch.Tensor(edge_weight)
self.cave_idx = torch.LongTensor(cave_idx)
self.node_prob = torch.Tensor(nodes_p)
self.node_prob_edge = self.node_prob[self.src_nodes]
self.number_of_nodes = weight_adj.shape[0]
self.number_of_edges = self.src_nodes.shape[0]
self.max_steps = max_steps
self.marginal_each_step = []
def _set_seeds(self, seed_list):
"""
setting the initial conditions using seed_list
"""
# The probabilities being infectious and susceptible
self.I = torch.zeros(self.number_of_nodes)
self.S = torch.ones(self.number_of_nodes)
for seed in seed_list:
self.I[seed] = 1
self.S[seed] = 0
self.record()
# self.message[i] is the message for edge [src_node[i], tar_node[i]]
# If src_node[i] is seed node, then self.message[i] = 1, else 0
self.message = torch.zeros(self.number_of_edges)
for i, src in enumerate(self.src_nodes):
if src in seed_list:
self.message[i] = 1
def record(self):
"""
recording a [N, 2] tensor for each step
"""
I = deepcopy(self.I)
S = deepcopy(self.S)
self.marginal_each_step.append(torch.stack((S, I), dim=1))
def iteration(self):
"""
One-step updating the message; Output the new I and S
"""
message = self.message * self.edge_weight * self.S[self.tar_ndoes]
message_aggregation = scatter(message, self.tar_ndoes, reduce="sum")
# message update
message_aggregation_cave = message_aggregation[self.src_nodes] - message[self.cave_idx]
self.message = self.message - self.node_prob_edge * self.message + message_aggregation_cave
# Nodes update
self.I = self.I - self.node_prob * self.I + message_aggregation
self.S = 1 - self.I
return self.I, self.S
def _stop(self):
if len(self.marginal_each_step) < 2:
return False
else:
former, later = self.marginal_each_step[-2:]
delta = torch.max(torch.abs(former-later))
if delta > 0.0001:
return False
else:
return True
def run(self, seed_list):
assert isinstance(seed_list, list)
seed_list = [int(seed) for seed in seed_list]
self._set_seeds(seed_list)
for step in range(self.max_steps):
self.iteration()
self.record()
if self._stop():
break
# stack marginals for output
marginals = torch.stack(self.marginal_each_step, dim=0) # ==> [T, N, 2]
return marginals
| 3,989 | 34.309735 | 105 | py |
NEDMP | NEDMP-main/src/DMP/SIR.py | # -*- encoding: utf-8 -*-
'''
@File : dmp_ic.py
@Time : 2021/04/02 13:26:45
@Author : Fei gao
@Contact : feig@mail.bnu.edu.cn
BNU, Beijing, China
'''
from functools import reduce
import networkx as nx
import torch as T
from torch_scatter import scatter
from torch_geometric.utils import degree
from src.DMP.utils import edgeList
class DMP_SIR():
def __init__(self, weight_adj, nodes_gamma):
self.edge_list = edgeList(weight_adj)
# edge_list with size [3, E], (src_node, tar_node, weight)
self.src_nodes = T.LongTensor(self.edge_list[0])
self.tar_nodes = T.LongTensor(self.edge_list[1])
self.weights = T.FloatTensor(self.edge_list[2])
self.cave_index = T.LongTensor(self.edge_list[3])
self.gamma = T.FloatTensor(nodes_gamma)[self.src_nodes]
self.nodes_gamma = T.FloatTensor(nodes_gamma)
self.N = max([T.max(self.src_nodes), T.max(self.tar_nodes)]).item()+1
self.E = len(self.src_nodes)
self.marginals = []
def mulmul(self, Theta_t):
Theta = scatter(Theta_t, index=self.tar_nodes, reduce="mul", dim_size=self.N) # [N]
Theta = Theta[self.src_nodes] #[E]
Theta_cav = scatter(Theta_t, index=self.cave_index, reduce="mul", dim_size=self.E+1)[:self.E]
mul = Theta / Theta_cav
return mul
def _set_seeds(self, seed_list):
self.seeds = T.zeros(self.N)
self.seeds[seed_list] = 1
# initial
self.Ps_0 = 1 - self.seeds
self.Pi_0 = self.seeds
self.Pr_0 = T.zeros_like(self.seeds)
self.Ps_i_0 = self.Ps_0[self.src_nodes]
self.Pi_i_0 = self.Pi_0[self.src_nodes]
self.Pr_i_0 = self.Pr_0[self.src_nodes]
self.Phi_ij_0 = 1 - self.Ps_i_0
self.Theta_ij_0 = T.ones(self.E)
# first iteration, t=1
self.Theta_ij_t = self.Theta_ij_0 - self.weights * self.Phi_ij_0 + 1E-10 # get rid of NaN
self.Ps_ij_t_1 = self.Ps_i_0 # t-1
self.Ps_ij_t = self.Ps_i_0 * self.mulmul(self.Theta_ij_t) # t
self.Phi_ij_t = (1-self.weights)*(1-self.gamma)*self.Phi_ij_0 - (self.Ps_ij_t-self.Ps_ij_t_1)
# marginals
self.Ps_t = self.Ps_0 * scatter(self.Theta_ij_t, self.tar_nodes, reduce="mul", dim_size=self.N)
self.Pr_t = self.Pr_0 + self.nodes_gamma*self.Pi_0
self.Pi_t = 1 - self.Ps_t - self.Pr_t
self.marginals.append([self.Ps_t, self.Pi_t, self.Pr_t])
# print(T.stack([self.Ps_t, self.Pi_t, self.Pr_t], dim=1))
def iteration(self):
self.Theta_ij_t = self.Theta_ij_t - self.weights * self.Phi_ij_t
new_Ps_ij_t = self.Ps_i_0 * self.mulmul(self.Theta_ij_t)
self.Ps_ij_t_1 = self.Ps_ij_t
self.Ps_ij_t = new_Ps_ij_t
self.Phi_ij_t = (1-self.weights)*(1-self.gamma)*self.Phi_ij_t - (self.Ps_ij_t-self.Ps_ij_t_1)
# marginals
self.Ps_t = self.Ps_0 * scatter(self.Theta_ij_t, self.tar_nodes, reduce="mul", dim_size=self.N)
self.Pr_t = self.Pr_t + self.nodes_gamma*self.Pi_t
self.Pi_t = 1 - self.Ps_t - self.Pr_t
self.marginals.append([self.Ps_t, self.Pi_t, self.Pr_t])
# print(T.stack([self.Ps_t, self.Pi_t, self.Pr_t], dim=1))
def _stop(self):
I_former, R_former = self.marginals[-2][1:]
I_later , R_later = self.marginals[-1][1:]
I_delta = T.sum(T.abs(I_former-I_later))
R_delta = T.sum(T.abs(R_former-R_later))
if I_delta>0.01 or R_delta>0.01:
return False
else:
return True
def output(self):
marginals = [T.stack(m, dim=1) for m in self.marginals]
marginals = T.stack(marginals, dim=0)
return marginals
def run(self, seed_list):
self._set_seeds(seed_list)
while True:
self.iteration()
if self._stop():
break
# Output a size of [T, N, 3] Tensor, T starts from t=1
return self.output()
| 3,995 | 33.747826 | 103 | py |
PoSFeat | PoSFeat-main/networks/PoSFeat_model.py | '''
WSFModel without global header
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from abc import ABC, abstractmethod
from path import Path
import os
import networks
class PoSFeat(ABC):
def __init__(self, configs, device, no_cuda=None):
self.config = configs
self.device = device
self.no_cuda = no_cuda
self.align_local_grad = self.config['align_local_grad']
self.local_input_elements = self.config['local_input_elements']
self.local_with_img = self.config['local_with_img']
self.parameters = []
backbone = getattr(networks, self.config['backbone'])
self.backbone = backbone(**self.config['backbone_config']).to(self.device)
self.parameters += list(self.backbone.parameters())
# self.backbone.eval()
message = "backbone: {}\n".format(self.config['backbone'])
if 'localheader' in list(self.config.keys()) and self.config['localheader'] != 'None':
# if self.config['localheader'] is not None:
localheader = getattr(networks, self.config['localheader'])
self.localheader = localheader(**self.config['localheader_config']).to(self.device)
message += "localheader: {}\n".format(self.config['localheader'])
else:
in_channel = self.backbone.out_channels[0]
# if self.config['backbone'] == 'LiteHRNet':
# in_channel = self.config['backbone_config']['extra']['stages_spec']['num_channels'][-1][0]
# else:
# in_channel = 128
self.localheader = networks.KeypointDet(in_channels=in_channel, out_channels=2).to(self.device)
message += "localheader: KeypointDet\n"
self.parameters += list(self.localheader.parameters())
self.modules = ['localheader', 'backbone']
print(message)
def set_parallel(self, local_rank):
self.backbone = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.backbone)
self.backbone = torch.nn.parallel.DistributedDataParallel(self.backbone,
find_unused_parameters=True,device_ids=[local_rank],output_device=local_rank)
self.localheader = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.localheader)
self.localheader = torch.nn.parallel.DistributedDataParallel(self.localheader,
find_unused_parameters=True,device_ids=[local_rank],output_device=local_rank)
def load_checkpoint(self, load_path):
load_root = Path(load_path)
model_list = ['backbone', 'localheader']
for name in model_list:
model_path = load_root/'{}.pth'.format(name)
if os.path.exists(model_path):
print('load {} from checkpoint'.format(name))
else:
print('{} does not exist, skipping load'.format(name))
continue
model = getattr(self, name)
model_param = torch.load(model_path)
# print('\n\n {}\n'.format(name))
# for key, val in model_param.items():
# print(key)
model.load_state_dict(model_param)
def save_checkpoint(self, save_path):
save_root = Path(save_path)
model_list = ['backbone', 'localheader']
for name in model_list:
model_path = save_root/'{}.pth'.format(name)
model = getattr(self, name)
model_param = model.state_dict()
torch.save(model_param, model_path)
def set_train(self):
self.backbone.train()
self.localheader.train()
def set_eval(self):
self.backbone.eval()
self.localheader.eval()
def extract(self, tensor, postfix=""):
feat_maps = self.backbone(tensor)
# g_map = self.globalheader(feat_maps['global_map'])
b, c, h, w = feat_maps['global_map'].shape
g_map = torch.ones(b,1, h, w).type_as(feat_maps['local_map']).to(feat_maps['local_map'].device)
local_list = []
for name in self.local_input_elements:
local_list.append(feat_maps[name])
local_input = torch.cat(local_list, dim=1)
if not self.align_local_grad:
# l_map = self.localheader(local_input)
local_input = local_input.detach()
# else:
# l_map = self.localheader(local_input.detach())
if self.local_with_img:
local_input = [local_input, tensor]
l_map = self.localheader(local_input)
if l_map.shape[1] == 1:
local_thr = torch.zeros_like(l_map)
elif l_map.shape[1] == 2:
local_thr = l_map[:,1:,:,:]
l_map = l_map[:,:1,:,:]
g_desc = g_map*feat_maps['global_map']
# g_desc = g_desc.sum([2,3])
g_desc = F.normalize(g_desc, p=2, dim=1).mean([2,3])
outputs = {
'local_map': feat_maps['local_map'],
'global_map': feat_maps['global_map'],
'global_feat': g_desc,
'local_point': l_map,
'local_thr': local_thr,
'global_point': g_map
}
# outputs = {
# 'local_feat{}'.format(postfix): feat_maps['fine_map'],
# 'global_feat{}'.format(postfix): g_desc,
# 'local_point{}'.format(postfix): l_map,
# 'global_point{}'.format(postfix): g_map
# }
return outputs
def forward(self, inputs):
for key, val in inputs.items():
if key in self.no_cuda:
continue
inputs[key] = val.to(self.device)
# preds = self.extract(inputs['im1'],1)
# preds.update(self.extract(inputs['im2'],2))
preds1 = self.extract(inputs['im1'],1)
preds2 = self.extract(inputs['im2'],2)
return {'preds1':preds1, 'preds2':preds2}
| 5,799 | 37.926174 | 108 | py |
PoSFeat | PoSFeat-main/networks/DescNet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import importlib
def class_for_name(module_name, class_name):
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
return getattr(m, class_name)
class ResUNet(nn.Module):
def __init__(self,
encoder='resnet50',
pretrained=True,
coarse_out_ch=128,
fine_out_ch=128
):
super(ResUNet, self).__init__()
assert encoder in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'wide_resnet50_2'], "Incorrect encoder type"
if encoder in ['resnet18', 'resnet34']:
filters = [64, 128, 256, 512]
else:
filters = [256, 512, 1024, 2048]
resnet = class_for_name("torchvision.models", encoder)(pretrained=pretrained)
self.firstconv = resnet.conv1 # H/2
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool # H/4
# encoder
self.layer1 = resnet.layer1 # H/4
self.layer2 = resnet.layer2 # H/8
self.layer3 = resnet.layer3 # H/16
# coarse-level conv
self.conv_coarse = conv(filters[2], coarse_out_ch, 1, 1)
# decoder
self.upconv3 = upconv(filters[2], 512, 3, 2)
self.iconv3 = conv(filters[1] + 512, 512, 3, 1)
self.upconv2 = upconv(512, 256, 3, 2)
self.iconv2 = conv(filters[0] + 256, 256, 3, 1)
# fine-level conv
self.conv_fine = conv(256, fine_out_ch, 1, 1)
self.out_channels = [fine_out_ch, coarse_out_ch]
def skipconnect(self, x1, x2):
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return x
def forward(self, x):
x = self.firstrelu(self.firstbn(self.firstconv(x)))
x_first = self.firstmaxpool(x)
x1 = self.layer1(x_first)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x_coarse = self.conv_coarse(x3) #H/16
x = self.upconv3(x3)
x = self.skipconnect(x2, x)
x = self.iconv3(x)
x = self.upconv2(x)
x = self.skipconnect(x1, x)
x = self.iconv2(x)
x_fine = self.conv_fine(x) #H/4
return {'global_map':x_coarse, 'local_map':x_fine, 'local_map_small':x_first}
class ResUNetHR(nn.Module):
def __init__(self,
encoder='resnet50',
pretrained=True,
coarse_out_ch=128,
fine_out_ch=128
):
super(ResUNetHR, self).__init__()
assert encoder in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'], "Incorrect encoder type"
if encoder in ['resnet18', 'resnet34']:
filters = [64, 128, 256, 512]
else:
filters = [256, 512, 1024, 2048]
resnet = class_for_name("torchvision.models", encoder)(pretrained=pretrained)
self.firstconv = resnet.conv1 # H/2
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool # H/4
# encoder
self.layer1 = resnet.layer1 # H/4
self.layer2 = resnet.layer2 # H/8
self.layer3 = resnet.layer3 # H/16
# coarse-level conv
self.conv_coarse = conv(filters[2], coarse_out_ch, 1, 1)
# decoder
self.upconv3 = upconv(filters[2], 512, 3, 2)
self.iconv3 = conv(filters[1] + 512, 512, 3, 1)
self.upconv2 = upconv(512, 256, 3, 2)
self.iconv2 = conv(filters[0] + 256, 256, 3, 1)
self.upconv1 = upconv(256,192,3,2)
self.iconv1 = conv(64 + 192, 256, 3, 1)
# fine-level conv
self.conv_fine = conv(256, fine_out_ch, 1, 1)
self.out_channels = [fine_out_ch, coarse_out_ch]
def skipconnect(self, x1, x2):
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return x
def forward(self, x):
x_first1 = self.firstrelu(self.firstbn(self.firstconv(x)))
x_first = self.firstmaxpool(x_first1)
x1 = self.layer1(x_first)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x_coarse = self.conv_coarse(x3) #H/16
x = self.upconv3(x3)
x = self.skipconnect(x2, x)
x = self.iconv3(x)
x = self.upconv2(x)
x = self.skipconnect(x1, x)
x = self.iconv2(x)
x = self.upconv1(x)
x = self.skipconnect(x_first1, x)
x = self.iconv1(x)
x_fine = self.conv_fine(x) #H/2
return {'global_map':x_coarse, 'local_map':x_fine, 'local_map_small':x_first1}
class conv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, stride):
super(conv, self).__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(num_in_layers,
num_out_layers,
kernel_size=kernel_size,
stride=stride,
padding=(self.kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(num_out_layers)
def forward(self, x):
return F.elu(self.bn(self.conv(x)), inplace=True)
class upconv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, scale):
super(upconv, self).__init__()
self.scale = scale
self.conv = conv(num_in_layers, num_out_layers, kernel_size, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale, align_corners=True, mode='bilinear')
return self.conv(x) | 6,504 | 33.236842 | 133 | py |
PoSFeat | PoSFeat-main/networks/DeteNet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class KeypointDet(nn.Module):
"""
spatical attention header
"""
def __init__(self, in_channels, out_channels=1, prior='SSIM', act='Sigmoid'):
super(KeypointDet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
self.norm1 = nn.InstanceNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels+64, 128, 3, 1, 1)
self.norm2 = nn.InstanceNorm2d(128)
self.conv3 = nn.Conv2d(128, out_channels, 1, 1, 0)
self.norm3 = nn.InstanceNorm2d(out_channels)
self.relu = nn.PReLU()
self.prior = getattr(self, prior)
self.act = getattr(nn, act)()
self.convimg = nn.Conv2d(3, 64, 3, 1, 1)
self.normimg = nn.InstanceNorm2d(64)
def SSIM(self, x):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
x_pad = F.pad(x.abs(), (0,1,0,1), 'reflect')
x_lu = x_pad[:,:,:-1,:-1]
x_rb = x_pad[:,:,1:,1:]
x_lu = F.pad(x_lu, (1,1,1,1), 'reflect')
x_rb = F.pad(x_rb, (1,1,1,1), 'reflect')
m_x_lu = F.avg_pool2d(x_lu, 3, 1)
m_x_rb = F.avg_pool2d(x_rb, 3, 1)
sigma_x_lu = F.avg_pool2d(x_lu**2, 3, 1) - m_x_lu**2
sigma_x_rb = F.avg_pool2d(x_rb**2, 3, 1) - m_x_rb**2
sigma_x_lu_rb = F.avg_pool2d(x_lu*x_rb, 3, 1) - m_x_lu*m_x_rb
SSIM_n = (2 * m_x_lu * m_x_rb + C1) * (2 * sigma_x_lu_rb + C2)
SSIM_d = (m_x_lu ** 2 + m_x_rb ** 2 + C1) * (sigma_x_lu + sigma_x_rb + C2)
return torch.clamp((1 - SSIM_n / SSIM_d)/2, 0, 1)
def D2(self, x):
b,c,h,w = x.shape
window_size = 3
padding_size = window_size//2
x = F.relu(x)
max_per_sample = torch.max(x.view(b,-1), dim=1)[0]
exp = torch.exp(x/max_per_sample.view(b,1,1,1))
sum_exp = (
window_size**2*
F.avg_pool2d(
F.pad(exp, [padding_size]*4, mode='constant', value=1.),
window_size, stride=1
)
)
local_max_score = exp / sum_exp
depth_wise_max = torch.max(x, dim=1)[0]
depth_wise_max_score = x / depth_wise_max.unsqueeze(1)
all_scores = local_max_score * depth_wise_max_score
score = torch.max(all_scores, dim=1)[0]
# score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1)
return score.unsqueeze(1)
def ASL_Peak(self, x):
b,c,h,w = x.shape
window_size = 3
padding_size = window_size//2
# x = F.relu(x)
max_per_sample = torch.max(x.view(b,-1), dim=1)[0]
x = x/max_per_sample.view(b,1,1,1)
alpha_input = x - F.avg_pool2d(
F.pad(x, [padding_size]*4, mode='reflect'),
window_size, stride=1
)
alpha = F.softplus(alpha_input)
beta_input = x - x.mean(1, True)
beta = F.softplus(beta_input)
all_scores = (alpha*beta).max(1,True)[0]
return all_scores
def identity(self, x):
scores = torch.ones_like(x)
return scores.mean(1,True)
def forward(self, fine_maps):
fine_map = fine_maps[0]
img_tensor = fine_maps[1]
x_pf = self.prior(fine_map)
x_pi = self.prior(img_tensor)
x = self.relu(self.norm1(self.conv1(x_pf*fine_map)))
x = F.interpolate(x, img_tensor.shape[2:], align_corners=False, mode='bilinear')
img_tensor = self.normimg(self.convimg(x_pi*img_tensor))
x = torch.cat([x, img_tensor], dim=1)
x = self.relu(self.norm2(self.conv2(x)))
score = self.act(self.norm3(self.conv3(x)))
# thr = self.act(self.conv_thr(x))
# score = self.relu(score-thr)
score =F.interpolate(x_pf, img_tensor.shape[2:], align_corners=False, mode='bilinear').mean(1,True) * \
x_pi.mean(1,True) * score
return score | 3,955 | 31.694215 | 111 | py |
PoSFeat | PoSFeat-main/managers/extractor.py | import os
import cv2
import datetime
import shutil
import logging
import yaml
import importlib
import numpy as np
import time
import h5py
from path import Path
from abc import ABC, abstractmethod
import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
import networks
import datasets
import losses
import datasets.data_utils as dutils
from losses.preprocess_utils import *
import losses.preprocess_utils as putils
from tqdm import tqdm
import colorlog
from PIL import Image as Im
class TqdmHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
msg = self.format(record)
tqdm.write(msg)
class Extractor(ABC):
def __init__(self, args):
self.args = args
with open(self.args.config, 'r') as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
# timestamp = datetime.datetime.now().strftime("%m%d-%H%M")
self.save_root = Path('./ckpts/{}'.format(self.config['output_root']))
self.logfile = self.save_root/'logging_file.txt'
self.desc_root = self.save_root/'desc'
self.img_root = self.save_root/'image'
self.sift_kp = self.config['use_sift']
if 'save_npz' in list(self.config.keys()):
self.save_npz = self.config['save_npz']
else:
self.save_npz = True
if 'save_h5' in list(self.config.keys()):
self.save_h5 = self.config['save_h5']
else:
self.save_h5 = False
ckpt_path = Path(self.config['load_path'])
cfg_path = ckpt_path.dirname()/'config.yaml'
with open(cfg_path, 'r') as f:
pre_conf = yaml.load(f, Loader=yaml.FullLoader)
self.config['model_config'].update(pre_conf['model_config'])
if 'model' in list(pre_conf.keys()):
self.config['model'] = pre_conf['model']
self.set_device()
self.set_folder_and_logger()
## model
if 'model' in list(self.config.keys()):
tmp_model = getattr(networks, self.config['model'])
self.model = tmp_model(self.config['model_config'], self.device)
else:
self.model = networks.WSFNet(self.config['model_config'], self.device)
if self.multi_gpu:
self.model.set_parallel(self.args.local_rank)
# self.model.save_checkpoint(self.save_root)
self.model.load_checkpoint(self.config['load_path'])
self.model.set_eval()
if not self.config['use_sift']:
self.detector = getattr(putils, self.config['detector'])
self.logger.info('use {} to detect keypoints'.format(self.config['detector']))
else:
self.logger.info('use sift keypoints')
## dataloader
dataset = getattr(datasets, self.config['data'])
extract_dataset = dataset(configs=self.config['data_config_extract'])
if self.multi_gpu:
extract_sampler = torch.utils.data.distributed.DistributedSampler(extract_dataset)
else:
extract_sampler = None
self.extract_loader = torch.utils.data.DataLoader(extract_dataset, batch_size=self.config['data_config_extract']['batch_size'],
shuffle=False, num_workers=self.config['data_config_extract']['workers'],
collate_fn=self.my_collate, sampler=extract_sampler)
def my_collate(self, batch):
''' Puts each data field into a tensor with outer dimension batch size '''
batch = list(filter(lambda b: b is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def set_device(self):
if torch.cuda.device_count() == 0:
self.device = torch.device("cpu")
self.output_flag=True
self.multi_gpu = False
print('use CPU for extraction')
elif torch.cuda.device_count() == 1:
self.device = torch.device("cuda")
self.output_flag=True
self.multi_gpu = False
print('use a single GPU for extraction')
else:
self.device = torch.device("cuda", self.args.local_rank)
self.multi_gpu = True
dist.init_process_group(backend='nccl')
# torch.autograd.set_detect_anomaly(True) # for debug
if self.args.local_rank == 0:
self.output_flag=True
print('use {} GPUs for extraction'.format(torch.cuda.device_count()))
else:
self.output_flag=False
def set_folder_and_logger(self):
if self.output_flag:
if not os.path.exists(self.save_root) :
self.save_root.makedirs_p()
else:
# if path exsists, quit to make sure that the previous setting.txt would not be overwritten
if self.config['data'] == 'ETH_LFB' or self.config['data'] == 'IMC_eval':
pass
else:
raise "The save path is already exists, please change the output_root in config"
print('=> will save everything to {}'.format(self.save_root))
# shutil.copy(self.args.config, self.save_root/'config.yaml')
with open(self.save_root/'config.yaml', 'w') as fout:
yaml.dump(self.config, fout)
self.logfile.touch()
if not os.path.exists(self.desc_root) :
self.desc_root.makedirs_p()
if not os.path.exists(self.img_root) :
self.img_root.makedirs_p()
while not os.path.exists(self.logfile):
time.sleep(0.5)
continue
self.logger = logging.getLogger()
if self.output_flag:
self.logger.setLevel(logging.INFO)
fh = logging.FileHandler(self.logfile, mode='a')
fh.setLevel(logging.DEBUG)
# ch = logging.StreamHandler()
ch = TqdmHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - gpu {} - %(levelname)s: %(message)s".format(self.args.local_rank))
fh.setFormatter(formatter)
# ch.setFormatter(formatter)
ch.setFormatter(colorlog.ColoredFormatter(
"%(asctime)s - gpu {} - %(levelname)s: %(message)s".format(self.args.local_rank),
log_colors={
'DEBUG': 'cyan',
'INFO': 'white',
'SUCCESS:': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white'},))
self.logger.addHandler(fh)
self.logger.addHandler(ch)
else:
self.logger.setLevel(logging.ERROR)
fh = logging.FileHandler(self.logfile, mode='a')
fh.setLevel(logging.ERROR)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s - gpu {} - %(levelname)s: %(message)s".format(self.local_rank))
fh.setFormatter(formatter)
# ch.setFormatter(formatter)
ch.setFormatter(colorlog.ColoredFormatter(
"%(asctime)s - gpu {} - %(levelname)s: %(message)s".format(self.args.local_rank),
log_colors={
'DEBUG': 'cyan',
'INFO': 'white',
'SUCCESS:': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white'},))
self.logger.addHandler(fh)
self.logger.addHandler(ch)
# logger.info('test logger')
def findthr(self, tensor, thr):
tensor_np = tensor.cpu().numpy().reshape(-1,1)
max_val = np.percentile(tensor_np, thr)
return max_val
def save_imgs(self, inputs, outputs, processed):
local_point = outputs['local_point']
message = "\nlocal_min:{:.3f} max:{:.3f} global_min:{:.3f} max:{:.3f}".format(
local_point.min(), local_point.max(), global_point.min(), global_point.max())
save_path = self.img_root/inputs['name1'][0]
name = save_path.name.split('.')[0]
save_path = save_path.dirname()
if not save_path.exists():
save_path.makedirs_p()
bi, ci, hi, wi = inputs['im1'].shape
bo, co, ho, wo = local_point.shape
if hi != ho or wi != wo:
local_point = F.interpolate(local_point, (hi, wi))
bi, hi, wi, ci = inputs['im1_ori'].shape
pad = inputs['pad1']
if pad[3] != 0:
local_point = local_point[:,:,:-pad[3],:]
if pad[1] != 0:
local_point = local_point[:,:,:,:-pad[1]]
local_point1 = local_point[:,0,:,:]
local_point1 = local_point1/self.findthr(local_point1, 100*self.config['local_thr'])
local_point1 = local_point1.clamp(0,1)
local_point1 = dutils.tensor2array(local_point1.squeeze())[:3,:,:].transpose(1,2,0)
local_point1 = Im.fromarray((255*local_point1).astype(np.uint8))
local_point1.save(save_path/'{:>05d}_score_map.jpg'.format(name))
imgs_with_kps = inputs['im1_ori'].squeeze().cpu().numpy().astype(np.uint8)
# imgs_with_kps = cv2.cvtColor(imgs_with_kps, cv2.COLOR_RGB2BGR)
color = (0,255,0)
for kp in processed['kpt']:
kp = (kp[0], kp[1])
cv2.circle(imgs_with_kps, kp, radius=2, color=color, thickness=-1)
imgs_with_kps = cv2.cvtColor(imgs_with_kps, cv2.COLOR_BGR2RGB)
imgs_with_kps = Im.fromarray(imgs_with_kps)
imgs_with_kps.save(save_path/'{:>05d}_image_with_kp.jpg'.format(name))
return message
def save_desc(self, inputs, outputs, processed):
kpt = processed['kpt']
feat_f = processed['desc']
kp_score = processed['kp_score']
name = inputs['name1'][0]#.replace('ppm','wsf')
save_path = self.desc_root/name
h5_path = self.desc_root+'h5'
if not save_path.dirname().exists():
save_path.dirname().makedirs_p()
message = "\nkpts: {}".format(kpt.shape[0])
if self.save_npz:
desc = feat_f.squeeze(0).detach().cpu().numpy()
scores = kp_score.squeeze(0).detach().cpu().numpy()
with open(save_path + '.{}'.format(self.config['postfix']), 'wb') as output_file:
np.savez(output_file, keypoints=kpt, scores=scores, descriptors=desc)
if self.save_h5:
# now it is only for image-matching-benchmark, so the name is seq/name.jpg
desc = feat_f.squeeze(0).detach().cpu().numpy() #save as nxc
scores = kp_score.squeeze(0).detach().cpu().numpy()
scales = np.ones_like(scores)
h5_name = name.split('.')[0]
h5_seq = h5_name.split('/')[:-1]
h5_seq = '/'.join(h5_seq)
h5_name = h5_name.split('/')[-1]
if not os.path.exists(h5_path/h5_seq):
(h5_path/h5_seq).makedirs_p()
with h5py.File(h5_path/h5_seq+'/keypoints.h5', 'a') as fkp, \
h5py.File(h5_path/h5_seq+'/descriptors.h5', 'a') as fdesc, \
h5py.File(h5_path/h5_seq+'/scores.h5', 'a') as fsco, \
h5py.File(h5_path/h5_seq+'/scales.h5', 'a') as fsca:
try:
fkp[h5_name] = kpt
fdesc[h5_name] = desc
fsco[h5_name] = scores
fsca[h5_name] = scales
except OSError as error:
if 'No space left on device' in error.args[0]:
self.logger.error(
'Out of disk space: storing features on disk can take '
'significant space, did you enable the as_half flag?')
del grp, fh5[name]
raise error
# for hloc input
with h5py.File(h5_path/'feat.h5', 'a') as fh5:
try:
grp = fh5.create_group(name)
grp.create_dataset('keypoints', data=kpt)
grp.create_dataset('scores', data=scores)
grp.create_dataset('descriptors', data=desc)
grp.create_dataset('image_size', data=np.array([w,h]))
except OSError as error:
if 'No space left on device' in error.args[0]:
self.logger.error(
'Out of disk space: storing features on disk can take '
'significant space, did you enable the as_half flag?')
del grp, fh5[name]
raise error
return message
def process(self, inputs, outputs, remove_pad=False):
desc_f = outputs['local_map']
name = inputs['name1'][0]
if remove_pad:
b,c,h,w = inputs['im1_ori'].shape
pad = inputs['pad1']
desc_f = desc_f[:,:,:-(pad[3]//4),:-(pad[0]//4)]
outputs['local_point'] = outputs['local_point'][:,:,:-(pad[3]//4),:-(pad[0]//4)]
else:
b,c,h,w = inputs['im1'].shape
if self.sift_kp:
coords = inputs['coord1']
coord_n = normalize_coords(coords, h, w)
kp_score = torch.ones_like(coord_n)[:,:,:1]
else:
if self.config['data'] == 'Aachen_Day_Night':
cur_name_split = name.split('/')
if cur_name_split[0] == 'query':
coord_n, kp_score = self.detector(outputs['local_point'], **self.config['detector_config_query'])
else:
coord_n, kp_score = self.detector(outputs['local_point'], **self.config['detector_config'])
else:
coord_n, kp_score = self.detector(outputs['local_point'], **self.config['detector_config'])
coords = denormalize_coords(coord_n, h, w)
feat_f = sample_feat_by_coord(desc_f, coord_n, self.config['loss_distance']=='cos')
kpt = coords.cpu().numpy().squeeze(0)
# scale for inloc
if 'scale' in list(inputs.keys()):
kpt = kpt*inputs['scale'].cpu().numpy()
return {'kpt': kpt,
'desc': feat_f,
'kp_score': kp_score}
@torch.no_grad()
def extract(self):
bar = tqdm(self.extract_loader, total=int(len(self.extract_loader)), ncols=80)
color = np.array(range(256)).astype(np.float)[None,:].repeat(30, axis=0)
color = np.concatenate([np.zeros((30,20)),255*np.ones((30,20)),color], axis=1)
color = dutils.tensor2array(torch.tensor(color))[:3,:,:].transpose(1,2,0)
color = Im.fromarray((255*color).astype(np.uint8))
color.save(self.img_root/'0_colorbar.jpg')
name_list = ''
for idx, inputs in enumerate(bar):
for key, val in inputs.items():
if key == 'name1' or key == 'pad1':
continue
inputs[key] = val.to(self.device)
message = inputs['name1'][0]
outputs = self.model.extract(inputs['im1'])
processed = self.process(inputs, outputs)
if self.config['output_desc']:
message += self.save_desc(inputs, outputs, processed)
if self.config['output_img']:
message += self.save_imgs(inputs, outputs, processed, idx)
self.logger.info(message)
name_list += '{} {}\n'.format(idx, inputs['name1'][0])
torch.cuda.empty_cache()
with open(self.img_root/'name_list.txt', 'w') as f:
f.write(name_list)
| 15,845 | 40.373368 | 136 | py |
PoSFeat | PoSFeat-main/managers/trainer.py | import os
import datetime
import shutil
import logging
import yaml
import importlib
import time
from path import Path
from abc import ABC, abstractmethod
from PIL import Image as Im
import numpy as np
import torch.nn.functional as F
import torch
from torch.utils.tensorboard import SummaryWriter
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
import networks
import datasets
import losses
import datasets.data_utils as dutils
import losses.preprocess_utils as putils
from tqdm import tqdm
import cv2
import copy
import matplotlib
import matplotlib.pyplot as plt
class TqdmHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
msg = self.format(record)
tqdm.write(msg)
class Trainer(ABC):
def __init__(self, args):
## read the config file
## 读取配置文件
self.args = args
with open(self.args.config, 'r') as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
self.save_root = Path('./ckpts/{}'.format(self.config['checkpoint_name']))
self.logfile = self.save_root/'logging_file.txt'
## update the model config if there is a checkpoint
## 如果存在checkpoint,则根据checkpoint中的模型配置来更新配置文件,确保参数正确载入
ckpt_path = None
if 'load_path' in list(self.config.keys()):
if self.config['load_path'] is not None:
ckpt_path = Path(self.config['load_path'])
cfg_path = ckpt_path.dirname()/'config.yaml'
with open(cfg_path, 'r') as f:
pre_conf = yaml.load(f, Loader=yaml.FullLoader)
self.config['model_config'].update(pre_conf['model_config'])
if 'model' in list(pre_conf.keys()):
self.config['model'] = pre_conf['model']
## set training device, and now the multi-GPU training is slow (unknown reason)
## 设置训练设备(CPU/GPU/multi GPU),目前的多GPU训练速度较慢(原因未知)
self.set_device()
## set logger, create folder and save config file into the folder
## 设置logger,创建训练文件夹并存储配置文件
self.set_folder_and_logger()
## model
if 'model' in list(self.config.keys()):
tmp_model = getattr(networks, self.config['model'])
self.model = tmp_model(self.config['model_config'], self.device, self.config['no_cuda'])
else:
self.model = networks.PoSFeat(self.config['model_config'], self.device, self.config['no_cuda'])
parameters = []
for module_name, module_lr in zip(self.config['optimal_modules'], self.config['optimal_lrs']):
tmp_module = getattr(self.model, module_name)
parameters.append({'params':tmp_module.parameters(), 'lr':module_lr})
self.all_optimized_modules = self.config['optimal_modules']
for module_name in self.model.modules:
if module_name not in self.all_optimized_modules:
tmp_module = getattr(self.model, module_name)
for p in tmp_module.parameters():
p.requires_grad = False
if ckpt_path is not None:
self.logger.info('load checkpoint from {}'.format(ckpt_path))
self.model.load_checkpoint(ckpt_path)
if self.multi_gpu:
self.model.set_parallel(self.args.local_rank)
## losses
if 'preprocess_train' in list(self.config.keys()):
tmp_model = getattr(losses, self.config['preprocess_train'])
self.preprocess = tmp_model(self.config['preprocess_train_config'], self.device).to(self.device)
self.skip_preprocess = False
else:
self.preprocess = losses.Preprocess_Skip().to(self.device)
self.skip_preprocess = True
self.losses = []
self.losses_weight = []
for loss_name, loss_weight in zip(self.config['losses'], self.config['losses_weight']):
loss_module = getattr(losses, loss_name)
self.losses.append(loss_module(self.config['{}_config'.format(loss_name)], self.device).to(self.device))
self.losses_weight.append(float(loss_weight))
if hasattr(self.losses[-1], 'load_checkpoint'):
if ckpt_path is not None:
self.losses[-1].load_checkpoint(ckpt_path)
# parameters += list(self.losses[-1].parameters())
parameters.append({'params':self.losses[-1].parameters()})
## optimizer
self.logger.info(parameters)
self.logger.info(self.all_optimized_modules)
tmp_optimizer = getattr(torch.optim, self.config['optimizer'])
self.optimizer = tmp_optimizer(parameters)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
step_size=self.config['lr_decay_step'],
gamma=self.config['lr_decay_factor'])
self.logger.info(self.config['optimizer'])
## dataloader
dataset = getattr(datasets, self.config['data'])
train_dataset = dataset(configs=self.config['data_config_train'], is_train=True)
if self.multi_gpu:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.config['data_config_train']['batch_size'],
shuffle= ~self.multi_gpu, num_workers=self.config['data_config_train']['workers'],
collate_fn=self.my_collate, sampler=train_sampler)
val_dataset = dataset(configs=self.config['val_config']['data_config_val'], is_train=False)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=self.config['val_config']['data_config_val']['batch_size'],
shuffle= self.config['val_config']['data_config_val']['shuffle'],
num_workers=self.config['val_config']['data_config_val']['workers'],
collate_fn=self.my_collate)
val_iter = iter(putils.cycle(val_loader))
self.val_data = next(val_iter)
del val_dataset, val_loader, val_iter
with open(self.save_root/'val_data.npz', 'wb') as out_f:
np.savez(out_f, val_data=self.val_data)
def my_collate(self, batch):
''' Puts each data field into a tensor with outer dimension batch size '''
batch = list(filter(lambda b: b is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def set_device(self):
if torch.cuda.device_count() == 0:
self.device = torch.device("cpu")
self.output_flag=True
self.multi_gpu = False
print('use CPU for training')
elif torch.cuda.device_count() == 1:
self.device = torch.device("cuda")
self.output_flag=True
self.multi_gpu = False
self.args.local_rank = 0
print('use a single GPU for training')
else:
self.device = torch.device("cuda", self.args.local_rank)
self.multi_gpu = True
dist.init_process_group(backend='nccl')
# torch.autograd.set_detect_anomaly(True) # for debug
if self.args.local_rank == 0:
self.output_flag=True
print('use {} GPUs for training'.format(torch.cuda.device_count()))
else:
self.output_flag=False
def set_folder_and_logger(self):
if self.output_flag:
if not os.path.exists(self.save_root) :
self.save_root.makedirs_p()
else:
# if path exsists, quit to make sure that the previous setting.txt would not be overwritten
# 如果路径已存在,退出训练保证之间的配置文件不会被覆盖
raise "The save path is already exists, please update the folder name"
print('=> will save everything to {}'.format(self.save_root))
with open(self.save_root/'config.yaml', 'w') as fout:
yaml.dump(self.config, fout)
self.logfile.touch()
self.writer = SummaryWriter(self.save_root)
self.logger = logging.getLogger()
# color settings
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': YELLOW,
'INFO': CYAN,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
msg = "%(asctime)s-gpu {}-%(levelname)s: %(message)s".format(self.args.local_rank)
formatter = logging.Formatter(msg)
color_formatter = ColoredFormatter(formatter_message(msg, True))
if self.output_flag:
self.logger.setLevel(logging.INFO)
fh = logging.FileHandler(self.logfile, mode='a')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = TqdmHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(color_formatter)
else:
self.logger.setLevel(logging.ERROR)
fh = logging.FileHandler(self.logfile, mode='a')
fh.setLevel(logging.ERROR)
fh.setFormatter(formatter)
ch = TqdmHandler()
ch.setLevel(logging.ERROR)
ch.setFormatter(color_formatter)
self.logger.addHandler(fh)
self.logger.addHandler(ch)
def save_errors(self, inputs, outputs, losses, loss_items):
if not os.path.exists(self.save_root/"error.pt"):
save_dict = {"inputs":inputs, "outputs": outputs,
"losses":losses, "loss_items":loss_items}
torch.save(save_dict, self.save_root/"error.pt")
def save_loss(self, save_path):
save_path = Path(save_path)
for idx in range(len(self.config['losses'])):
if hasattr(self.losses[idx], 'save_checkpoint'):
self.losses[idx].save_checkpoint(save_path)
def train(self):
batch_size_val = self.val_data['im1'].shape[0]
epoch_path = self.save_root/'{:>03d}'.format(0)
epoch_path.makedirs_p()
self.model.save_checkpoint(epoch_path)
self.save_loss(epoch_path)
for epoch in range(self.config['epoch']):
epoch += 1
epoch_path = self.save_root/'{:>03d}'.format(epoch)
epoch_path.makedirs_p()
batch_path_list = []
for i in range(batch_size_val):
batch_path = epoch_path/'{}'.format(i)
batch_path.makedirs_p()
batch_path_list.append(batch_path)
if self.config['epoch_step'] > 0:
total_steps = self.config['epoch_step']
else:
total_steps = len(self.train_loader)
bar = tqdm(self.train_loader, total=int(total_steps), ncols=80)
bar.set_description('{}/{} {}/{}'.format(self.config['checkpoint_name'], self.save_root.name, epoch, self.config['epoch']))
self.model.set_train()
for idx, inputs in enumerate(bar):
# val and vis
self.model.set_eval()
if self.output_flag and idx % self.config['log_freq'] == 0:
self.val_and_vis(batch_path_list, idx)
torch.cuda.empty_cache()
# train
self.model.set_eval()
for module in self.config['optimal_modules']:
tmp_module = getattr(self.model, module)
tmp_module.train()
outputs = self.model.forward(inputs)
outputs['epoch'] = epoch
outputs['iterations'] = int((epoch-1)*total_steps+idx)
processed = self.preprocess(inputs, outputs)
if self.skip_preprocess:
message = "epoch {} batch {}".format(epoch, idx)
else:
message = "epoch {} batch {} temperature {}".format(epoch, idx, processed['temperature'])
total_loss = 0
loss_items = []
temp_log = {}
for loss_name, loss_module, loss_weight in zip(self.config['losses'], self.losses, self.losses_weight):
tmp_loss, tmp_items = loss_module(inputs, outputs, processed)
total_loss += loss_weight*tmp_loss.mean()
temp_log[loss_name] = tmp_loss.detach().mean().item()
message += "\n {}:{:.5f}[{:.2f}] (total: {:.5f} ".format(loss_name, loss_weight*tmp_loss.detach().mean().item(), loss_weight,
tmp_loss.detach().mean().item())
for key, val in tmp_items.items():
message += "{}[{:.5f}] ".format(key, val.detach().mean().item())
message += ")"
loss_items.append(tmp_items)
message += '\n'
# if the loss is nan, skip this batch
# 如果loss是nan,则跳过当前batch
if total_loss.isnan():
self.logger.info(message)
self.logger.error("loss is nan in {}, check the error.pt".format(idx))
self.save_errors(inputs, outputs, total_loss, loss_items)
total_loss.backward()
self.optimizer.zero_grad()
continue
self.optimizer.zero_grad()
total_loss.backward()
if 'localheader' in self.all_optimized_modules:
grad_message = 'grad localheader conv1 mean {:.6f} max{:.6f}'.format(self.model.localheader.conv1.weight.grad.mean().item(),
self.model.localheader.conv1.weight.grad.max().item())
self.logger.info(grad_message)
if 'backbone' in self.all_optimized_modules:
grad_message = 'grad backbone conv_fine mean {:.6f} max{:.6f}'.format(self.model.backbone.conv_fine.conv.weight.grad.mean().item(),
self.model.backbone.conv_fine.conv.weight.grad.max().item())
self.logger.info(grad_message)
grad_message = 'grad backbone firstconv mean {:.6f} max{:.6f}'.format(self.model.backbone.firstconv.weight.grad.mean().item(),
self.model.backbone.firstconv.weight.grad.max().item())
self.logger.info(grad_message)
if self.config['grad_clip']:
for module_name in self.all_optimized_modules:
tmp_module = getattr(self.model, module_name)
torch.nn.utils.clip_grad_norm_(tmp_module.parameters(), self.config['clip_norm'])
if 'localheader' in self.all_optimized_modules:
grad_message = 'grad clipped localheader conv1 mean {:.6f} max{:.6f}'.format(self.model.localheader.conv1.weight.grad.mean().item(),
self.model.localheader.conv1.weight.grad.max().item())
self.logger.info(grad_message)
if 'backbone' in self.all_optimized_modules:
grad_message = 'grad clipped backbone firstconv mean {:.6f} max{:.6f}'.format(self.model.backbone.firstconv.weight.grad.mean().item(),
self.model.backbone.firstconv.weight.grad.max().item())
self.logger.info(grad_message)
self.optimizer.step()
self.logger.info(message)
if self.output_flag and idx%self.config['log_freq'] == 0:
self.writer.add_scalar('losses', total_loss.item(), int((epoch-1)*total_steps+idx))
for loss_name in self.config['losses']:
self.writer.add_scalar(loss_name, temp_log[loss_name], int((epoch-1)*total_steps+idx))
for components in loss_items:
for component_name in list(components.keys()):
if component_name in self.config['tb_component']:
self.writer.add_scalar(component_name, components[component_name], int((epoch-1)*total_steps+idx))
if self.output_flag and idx%100 == 0:
self.model.save_checkpoint(epoch_path)
torch.cuda.empty_cache()
if idx>=self.config['epoch_step']:
break
self.model.save_checkpoint(epoch_path)
self.save_loss(epoch_path)
self.scheduler.step()
@torch.no_grad()
def val_and_vis(self, batch_path_list, idx):
val_config = self.config['val_config']
self.model.set_eval()
outputs = self.model.forward(self.val_data)
mid_pad = 20
preds1 = outputs['preds1']
preds2 = outputs['preds2']
b,c,h,w = self.val_data['im1'].shape
all_images = ['0_original_images', '1_score_maps', '2_all_keypoints',
'3_matched_keypoints', '4_matches_less', '5_matches_all']
# if val_config['detector'] == 'sift':
# coord1 = self.val_data['coord1']
# coord2 = self.val_data['coord2']
# coord1_n = putils.normalize_coords(coord1, h, w)
# coord2_n = putils.normalize_coords(coord2, h, w)
# else:
# detector = getattr(putils, val_config['detector'])
# coord1_n = detector(preds1['local_point'], **val_config['detector_config'])
# coord2_n = detector(preds2['local_point'], **val_config['detector_config'])
# coord1 = putils.denormalize_coords(coord1_n, h, w)
# coord2 = putils.denormalize_coords(coord2_n, h, w)
# desc1 = putils.sample_feat_by_coord(preds1['local_map'], coord1_n, val_config['loss_distance']=='cos')
# desc2 = putils.sample_feat_by_coord(preds2['local_map'], coord1_n, val_config['loss_distance']=='cos')
for i, cur_path in enumerate(batch_path_list):
for image_folder in all_images:
tmp_path = cur_path/image_folder
if not tmp_path.exists():
tmp_path.makedirs_p()
cur_img1 = self.val_data['im1_ori'][i,...]
cur_img2 = self.val_data['im2_ori'][i,...]
cur_F12 = self.val_data['F1'][i,...]
score_map1 = preds1['local_point'][i,...]
score_map2 = preds2['local_point'][i,...]
comb_img = torch.cat((cur_img1, torch.zeros_like(cur_img1)[:,:mid_pad,:], cur_img2), dim=1)
comb_score = torch.cat((score_map1, torch.zeros_like(score_map1)[:,:,:mid_pad], score_map2), dim=2)
if val_config['detector'] == 'sift':
cur_kps1 = self.val_data['coord1'][i,:,:2]
cur_kps2 = self.val_data['coord2'][i,:,:2]
cur_score1 = torch.ones_like(cur_kps1)[...,0:1]
cur_score2 = torch.ones_like(cur_kps2)[...,0:1]
cur_kps1_n = putils.normalize_coords(cur_kps1, h, w).unsqueeze(0)
cur_kps2_n = putils.normalize_coords(cur_kps2, h, w).unsqueeze(0)
else:
detector = getattr(putils, val_config['detector'])
cur_kps1_n, cur_score1 = detector(preds1['local_point'][i:i+1,...],
**val_config['detector_config'])
cur_kps2_n, cur_score2 = detector(preds2['local_point'][i:i+1,...],
**val_config['detector_config'])
cur_kps1 = putils.denormalize_coords(cur_kps1_n, h, w).squeeze(0)
cur_kps2 = putils.denormalize_coords(cur_kps2_n, h, w).squeeze(0)
cur_score1 = cur_score1.squeeze(0)
cur_score2 = cur_score2.squeeze(0)
cur_desc1 = putils.sample_feat_by_coord(preds1['local_map'][i:i+1,...],
cur_kps1_n, val_config['loss_distance']=='cos').squeeze(0)
cur_desc2 = putils.sample_feat_by_coord(preds2['local_map'][i:i+1,...],
cur_kps2_n, val_config['loss_distance']=='cos').squeeze(0)
cur_matches = putils.mnn_matcher(cur_desc1, cur_desc2)
cur_matchkp1 = cur_kps1[cur_matches[:,0],:2]
cur_matchkp2 = cur_kps2[cur_matches[:,1],:2]
cur_kpscore_m1 = cur_score1[cur_matches[:,0],:1]
cur_kpscore_m2 = cur_score2[cur_matches[:,1],:1]
cur_kpscore = cur_kpscore_m1 + cur_kpscore_m2.to(cur_score1)
# cur_kpscore = cur_kpscore_m1 + cur_kpscore_m2
_, topk_idx = cur_kpscore.topk(min(val_config['vis_topk'], cur_kpscore.shape[0]), dim=0)
cur_matchkp1_h = putils.homogenize(cur_matchkp1).transpose(0, 1)
cur_matchkp2_h = putils.homogenize(cur_matchkp2).transpose(0, 1)
cur_epi_line1 = cur_F12@cur_matchkp1_h
cur_epi_line1 = cur_epi_line1 / torch.clamp(
torch.norm(cur_epi_line1[:2, :], dim=0, keepdim=True), min=1e-8)
epi_dist = torch.abs(torch.sum(cur_matchkp2_h * cur_epi_line1, dim=0)).unsqueeze(1)
epi_dist = epi_dist.clamp(min=0, max=val_config['vis_err_thr']).repeat(1,2)
match_color = dutils.tensor2array(val_config['vis_err_thr'] - epi_dist,
max_value=val_config['vis_err_thr'], colormap='RdYlGn')[:3,:,:1].transpose(1,2,0)
match_color = (255*match_color).astype(np.uint8)
match_color = cv2.cvtColor(match_color, cv2.COLOR_RGB2BGR).squeeze(1)
cur_matchkp1_less = cur_matchkp1[topk_idx, :2]
cur_matchkp2_less = cur_matchkp2[topk_idx, :2]
match_color_less = match_color[topk_idx.cpu().numpy()[:,0], :3]
cur_kps1 = list(map(tuple,cur_kps1.reshape(-1, 2).cpu().numpy()))
cur_kps2 = list(map(tuple,cur_kps2.reshape(-1, 2).cpu().numpy()))
cur_matchkp1 = list(map(tuple,cur_matchkp1.reshape(-1, 2).cpu().numpy()))
cur_matchkp2 = list(map(tuple,cur_matchkp2.reshape(-1, 2).cpu().numpy()))
match_color = list(map(tuple,match_color))
cur_matchkp1_less = list(map(tuple,cur_matchkp1_less.reshape(-1, 2).cpu().numpy()))
cur_matchkp2_less = list(map(tuple,cur_matchkp2_less.reshape(-1, 2).cpu().numpy()))
match_color_less = list(map(tuple,match_color_less))
comb_img = comb_img.cpu().numpy()
save_img = comb_img
save_img = Im.fromarray(save_img.astype(np.uint8))
save_img.save(cur_path/'0_original_images/{}.jpg'.format(idx))
comb_score = dutils.tensor2array(comb_score.squeeze())[:3,:,:].transpose(1,2,0)
save_img = 255*comb_score
save_img = Im.fromarray(save_img.astype(np.uint8))
save_img.save(cur_path/'1_score_maps/{}.jpg'.format(idx))
comb_img_kps = cv2.cvtColor(comb_img,cv2.COLOR_RGB2BGR)
color = (0,255,0)
for kp1 in cur_kps1:
cv2.circle(comb_img_kps, kp1, radius=2, color=color, thickness=-1)
for kp2 in cur_kps2:
kp2_comb = (int(kp2[0]+w+mid_pad), int(kp2[1]))
cv2.circle(comb_img_kps, kp2_comb, radius=2, color=color, thickness=-1)
comb_img_kps = cv2.cvtColor(comb_img_kps, cv2.COLOR_BGR2RGB)
save_img = comb_img_kps
save_img = Im.fromarray(save_img.astype(np.uint8))
save_img.save(cur_path/'2_all_keypoints/{}.jpg'.format(idx))
comb_img_kps_m = cv2.cvtColor(comb_img,cv2.COLOR_RGB2BGR)
color = (0,255,0)
for kp1, kp2 in zip(cur_matchkp1, cur_matchkp2):
cv2.circle(comb_img_kps_m, kp1, radius=2, color=color, thickness=-1)
# kp2_comb = kp2 + torch.tensor([w, 0]).reshape(1,2).to(kp2)
kp2_comb = (int(kp2[0]+w+mid_pad), int(kp2[1]))
cv2.circle(comb_img_kps_m, kp2_comb, radius=2, color=color, thickness=-1)
comb_img_kps_m = cv2.cvtColor(comb_img_kps_m, cv2.COLOR_BGR2RGB)
save_img = comb_img_kps_m
save_img = Im.fromarray(save_img.astype(np.uint8))
save_img.save(cur_path/'3_matched_keypoints/{}.jpg'.format(idx))
comb_img_m_less = cv2.cvtColor(comb_img,cv2.COLOR_RGB2BGR)
for kp1, kp2, color in zip(cur_matchkp1_less, cur_matchkp2_less, match_color_less):
# kp2_comb = kp2 + torch.tensor([w, 0]).reshape(1,2).to(kp2)
kp2_comb = (int(kp2[0]+w+mid_pad), int(kp2[1]))
color = (int(color[0]), int(color[1]), int(color[2]))
cv2.line(comb_img_m_less, kp1, kp2_comb, color, thickness=2)
cv2.circle(comb_img_m_less, kp1, radius=2, color=(0,255,0), thickness=-1)
cv2.circle(comb_img_m_less, kp2_comb, radius=2, color=(0,255,0), thickness=-1)
comb_img_m_less = cv2.cvtColor(comb_img_m_less, cv2.COLOR_BGR2RGB)
save_img = comb_img_m_less
save_img = Im.fromarray(save_img.astype(np.uint8))
save_img.save(cur_path/'4_matches_less/{}.jpg'.format(idx))
comb_img_m = cv2.cvtColor(comb_img,cv2.COLOR_RGB2BGR)
for kp1, kp2, color in zip(cur_matchkp1, cur_matchkp2, match_color):
# kp2_comb = kp2 + torch.tensor([w, 0]).reshape(1,2).to(kp2)
kp2_comb = (int(kp2[0]+w+mid_pad), int(kp2[1]))
color = (int(color[0]), int(color[1]), int(color[2]))
cv2.line(comb_img_m, kp1, kp2_comb, color, thickness=2)
cv2.circle(comb_img_m, kp1, radius=2, color=(0,255,0), thickness=-1)
cv2.circle(comb_img_m, kp2_comb, radius=2, color=(0,255,0), thickness=-1)
comb_img_m = cv2.cvtColor(comb_img_m, cv2.COLOR_BGR2RGB)
save_img = comb_img_m
save_img = Im.fromarray(save_img.astype(np.uint8))
save_img.save(cur_path/'5_matches_all/{}.jpg'.format(idx)) | 27,149 | 48.908088 | 159 | py |
PoSFeat | PoSFeat-main/evaluations/aachen/reconstruct_pipeline_v1_1.py | import argparse
import numpy as np
import os
import shutil
import subprocess
import sqlite3
import torch
import types
from tqdm import tqdm
from matchers import mutual_nn_matcher
from camera import Camera
from utils import quaternion_to_rotation_matrix, camera_center_to_translation
from path import Path
import sys
IS_PYTHON3 = sys.version_info[0] >= 3
def array_to_blob(array):
if IS_PYTHON3:
return array.tostring()
else:
return np.getbuffer(array)
def recover_database_images_and_ids(paths, args):
# Connect to the database.
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
# Recover database images and ids.
images = {}
cameras = {}
cursor.execute("SELECT name, image_id, camera_id FROM images;")
for row in cursor:
images[row[0]] = row[1]
cameras[row[0]] = row[2]
# Close the connection to the database.
cursor.close()
connection.close()
return images, cameras
def preprocess_reference_model(paths, args):
print('Preprocessing the reference model...')
# Recover intrinsics.
with open(os.path.join(paths.reference_model_path, 'database_intrinsics_v1_1.txt')) as f:
raw_intrinsics = f.readlines()
camera_parameters = {}
for intrinsics in raw_intrinsics:
intrinsics = intrinsics.strip('\n').split(' ')
image_name = intrinsics[0]
camera_model = intrinsics[1]
intrinsics = [float(param) for param in intrinsics[2 :]]
camera = Camera()
camera.set_intrinsics(camera_model=camera_model, intrinsics=intrinsics)
camera_parameters[image_name] = camera
# Recover poses.
with open(os.path.join(paths.reference_model_path, 'aachen_v_1_1.nvm')) as f:
raw_extrinsics = f.readlines()
# Skip the header.
n_cameras = int(raw_extrinsics[2])
raw_extrinsics = raw_extrinsics[3 : 3 + n_cameras]
for extrinsics in raw_extrinsics:
extrinsics = extrinsics.strip('\n').split(' ')
image_name = extrinsics[0]
# Skip the focal length. Skip the distortion and terminal 0.
qw, qx, qy, qz, cx, cy, cz = [float(param) for param in extrinsics[2 : -2]]
qvec = np.array([qw, qx, qy, qz])
c = np.array([cx, cy, cz])
# NVM -> COLMAP.
t = camera_center_to_translation(c, qvec)
camera_parameters[image_name].set_pose(qvec=qvec, t=t)
return camera_parameters
def generate_empty_reconstruction(images, cameras, camera_parameters, paths, args):
print('Generating the empty reconstruction...')
if not os.path.exists(paths.empty_model_path):
os.mkdir(paths.empty_model_path)
with open(os.path.join(paths.empty_model_path, 'cameras.txt'), 'w') as f:
for image_name in images:
image_id = images[image_name]
camera_id = cameras[image_name]
try:
camera = camera_parameters[image_name]
except:
continue
f.write('%d %s %s\n' % (
camera_id,
camera.camera_model,
' '.join(map(str, camera.intrinsics))
))
with open(os.path.join(paths.empty_model_path, 'images.txt'), 'w') as f:
for image_name in images:
image_id = images[image_name]
camera_id = cameras[image_name]
try:
camera = camera_parameters[image_name]
except:
continue
f.write('%d %s %s %d %s\n\n' % (
image_id,
' '.join(map(str, camera.qvec)),
' '.join(map(str, camera.t)),
camera_id,
image_name
))
with open(os.path.join(paths.empty_model_path, 'points3D.txt'), 'w') as f:
pass
def import_features(images, paths, args):
# Connect to the database.
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
# Import the features.
print('Importing features...')
for image_name, image_id in tqdm(images.items(), total=len(images.items())):
features_path = os.path.join(paths.features_path, '%s.%s' % (image_name, args.method_name))
keypoints = np.load(features_path)['keypoints']
n_keypoints = keypoints.shape[0]
# Keep only x, y coordinates.
keypoints = keypoints[:, : 2]
# Add placeholder scale, orientation.
keypoints = np.concatenate([keypoints, np.ones((n_keypoints, 1)), np.zeros((n_keypoints, 1))], axis=1).astype(np.float32)
keypoints_str = keypoints.tostring()
cursor.execute("INSERT INTO keypoints(image_id, rows, cols, data) VALUES(?, ?, ?, ?);",
(image_id, keypoints.shape[0], keypoints.shape[1], keypoints_str))
connection.commit()
# Close the connection to the database.
cursor.close()
connection.close()
def image_ids_to_pair_id(image_id1, image_id2):
if image_id1 > image_id2:
return 2147483647 * image_id2 + image_id1
else:
return 2147483647 * image_id1 + image_id2
def match_features(images, paths, args):
# Connect to the database.
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
# Match the features and insert the matches in the database.
print('Matching...')
with open(paths.match_list_path, 'r') as f:
raw_pairs = f.readlines()
image_pair_ids = set()
for raw_pair in tqdm(raw_pairs, total=len(raw_pairs)):
image_name1, image_name2 = raw_pair.strip('\n').split(' ')
features_path1 = os.path.join(paths.features_path, '%s.%s' % (image_name1, args.method_name))
features_path2 = os.path.join(paths.features_path, '%s.%s' % (image_name2, args.method_name))
descriptors1 = torch.from_numpy(np.load(features_path1)['descriptors']).to(device)
descriptors2 = torch.from_numpy(np.load(features_path2)['descriptors']).to(device)
matches = mutual_nn_matcher(descriptors1, descriptors2).astype(np.uint32)
image_id1, image_id2 = images[image_name1], images[image_name2]
image_pair_id = image_ids_to_pair_id(image_id1, image_id2)
if image_pair_id in image_pair_ids:
continue
image_pair_ids.add(image_pair_id)
if image_id1 > image_id2:
matches = matches[:, [1, 0]]
matches_str = matches.tostring()
cursor.execute("INSERT INTO matches(pair_id, rows, cols, data) VALUES(?, ?, ?, ?);",
(image_pair_id, matches.shape[0], matches.shape[1], matches_str))
connection.commit()
# Close the connection to the database.
cursor.close()
connection.close()
def geometric_verification(paths, args):
print('Running geometric verification...')
subprocess.call([os.path.join(args.colmap_path, 'colmap'), 'matches_importer',
'--database_path', paths.database_path,
'--match_list_path', paths.match_list_path,
'--match_type', 'pairs'])
def reconstruct(paths, args):
if not os.path.isdir(paths.database_model_path):
os.mkdir(paths.database_model_path)
# Reconstruct the database model.
subprocess.call([os.path.join(args.colmap_path, 'colmap'), 'point_triangulator',
'--database_path', paths.database_path,
'--image_path', paths.image_path,
'--input_path', paths.empty_model_path,
'--output_path', paths.database_model_path,
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0'])
def register_queries(paths, args):
if not os.path.isdir(paths.final_model_path):
os.mkdir(paths.final_model_path)
# Register the query images.
subprocess.call([os.path.join(args.colmap_path, 'colmap'), 'image_registrator',
'--database_path', paths.database_path,
'--input_path', paths.database_model_path,
'--output_path', paths.final_model_path,
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0'])
def recover_query_poses(paths, args):
print('Recovering query poses...')
if not os.path.isdir(paths.final_txt_model_path):
os.mkdir(paths.final_txt_model_path)
# Convert the model to TXT.
subprocess.call([os.path.join(args.colmap_path, 'colmap'), 'model_converter',
'--input_path', paths.final_model_path,
'--output_path', paths.final_txt_model_path,
'--output_type', 'TXT'])
# Recover query names.
query_image_list_path = os.path.join(args.dataset_path, 'queries/night_time_queries_with_intrinsics_v1_1.txt')
with open(query_image_list_path) as f:
raw_queries = f.readlines()
query_names = set()
for raw_query in raw_queries:
raw_query = raw_query.strip('\n').split(' ')
query_name = raw_query[0]
query_names.add(query_name)
with open(os.path.join(paths.final_txt_model_path, 'images.txt')) as f:
raw_extrinsics = f.readlines()
if not paths.prediction_path.parent.exists():
paths.prediction_path.parent.makedirs_p()
f = open(paths.prediction_path, 'w')
# Skip the header.
for extrinsics in raw_extrinsics[4 :: 2]:
extrinsics = extrinsics.strip('\n').split(' ')
image_name = extrinsics[-1]
if image_name in query_names:
# Skip the IMAGE_ID ([0]), CAMERA_ID ([-2]), and IMAGE_NAME ([-1]).
f.write('%s %s\n' % (image_name.split('/')[-1], ' '.join(extrinsics[1 : -2])))
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', required=True, help='Path to the dataset')
parser.add_argument('--feature_path', required=True, help='Path to the features')
parser.add_argument('--colmap_path', required=True, help='Path to the COLMAP executable folder')
parser.add_argument('--method_name', required=True, help='Name of the method')
parser.add_argument('--match_list_path', type=str, default='image_pairs_to_match.txt', help='config of reconstruct and register')
args = parser.parse_args()
# Torch settings for the matcher.
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Create the extra paths.
paths = types.SimpleNamespace()
paths.dummy_database_path = Path(args.dataset_path)/'others/database_v1_1.db'
paths.database_path = Path(args.dataset_path)/'intermedia/{}/{}_v1_1.db'.format(args.method_name,args.method_name)
paths.image_path = Path(args.dataset_path)/'images/images_upright'
paths.features_path = Path(args.feature_path)
paths.reference_model_path = Path(args.dataset_path)/'3D-models/aachen_v_1_1'
paths.match_list_path = Path(args.dataset_path)/'others/{}'.format(args.match_list_path)
paths.empty_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-empty-v_1_1'.format(args.method_name, args.method_name)
paths.database_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-database-v_1_1'.format(args.method_name, args.method_name)
paths.final_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-final-v_1_1'.format(args.method_name, args.method_name)
paths.final_txt_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-final-txt-v_1_1'.format(args.method_name, args.method_name)
paths.prediction_path = Path(args.dataset_path)/'results/Aachen_v1_1_eval_[{}].txt'.format(args.method_name)
# Create a copy of the dummy database.
if os.path.exists(paths.database_path):
raise FileExistsError('The database file already exists for method %s.' % args.method_name)
if not paths.database_path.parent.exists():
paths.database_path.parent.makedirs_p()
shutil.copyfile(paths.dummy_database_path, paths.database_path)
# Reconstruction pipeline.
camera_parameters = preprocess_reference_model(paths, args)
images, cameras = recover_database_images_and_ids(paths, args)
generate_empty_reconstruction(images, cameras, camera_parameters, paths, args)
import_features(images, paths, args)
match_features(images, paths, args)
geometric_verification(paths, args)
reconstruct(paths, args)
register_queries(paths, args)
recover_query_poses(paths, args) | 12,821 | 35.844828 | 141 | py |
PoSFeat | PoSFeat-main/evaluations/aachen/reconstruct_pipeline.py | import argparse
import numpy as np
from path import Path
import os
import shutil
import subprocess
import sqlite3
import torch
import types
from tqdm import tqdm
from matchers import mutual_nn_matcher
from camera import Camera
from utils import quaternion_to_rotation_matrix, camera_center_to_translation
import sys
IS_PYTHON3 = sys.version_info[0] >= 3
def array_to_blob(array):
if IS_PYTHON3:
return array.tostring()
else:
return np.getbuffer(array)
def recover_database_images_and_ids(paths, args):
# Connect to the database.
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
# Recover database images and ids.
images = {}
cameras = {}
cursor.execute("SELECT name, image_id, camera_id FROM images;")
for row in cursor:
images[row[0]] = row[1]
cameras[row[0]] = row[2]
# Close the connection to the database.
cursor.close()
connection.close()
return images, cameras
def preprocess_reference_model(paths, args):
print('Preprocessing the reference model...')
# Recover intrinsics.
with open(paths.reference_model_path/'database_intrinsics.txt') as f:
raw_intrinsics = f.readlines()
camera_parameters = {}
for intrinsics in raw_intrinsics:
intrinsics = intrinsics.strip('\n').split(' ')
image_name = intrinsics[0]
camera_model = intrinsics[1]
intrinsics = [float(param) for param in intrinsics[2 :]]
camera = Camera()
camera.set_intrinsics(camera_model=camera_model, intrinsics=intrinsics)
camera_parameters[image_name] = camera
# Recover poses.
with open(paths.reference_model_path/'aachen_cvpr2018_db.nvm') as f:
raw_extrinsics = f.readlines()
# Skip the header.
n_cameras = int(raw_extrinsics[2])
raw_extrinsics = raw_extrinsics[3 : 3 + n_cameras]
for extrinsics in raw_extrinsics:
extrinsics = extrinsics.strip('\n').split(' ')
image_name = extrinsics[0]
# Skip the focal length. Skip the distortion and terminal 0.
qw, qx, qy, qz, cx, cy, cz = [float(param) for param in extrinsics[2 : -2]]
qvec = np.array([qw, qx, qy, qz])
c = np.array([cx, cy, cz])
# NVM -> COLMAP.
t = camera_center_to_translation(c, qvec)
camera_parameters[image_name].set_pose(qvec=qvec, t=t)
return camera_parameters
def generate_empty_reconstruction(images, cameras, camera_parameters, paths, args):
print('Generating the empty reconstruction...')
if not paths.empty_model_path.exists():
paths.empty_model_path.makedirs_p()
with open(paths.empty_model_path/'cameras.txt', 'w') as f:
for image_name in images:
image_id = images[image_name]
camera_id = cameras[image_name]
try:
camera = camera_parameters[image_name]
except:
continue
f.write('%d %s %s\n' % (
camera_id,
camera.camera_model,
' '.join(map(str, camera.intrinsics))
))
with open(paths.empty_model_path/'images.txt', 'w') as f:
for image_name in images:
image_id = images[image_name]
camera_id = cameras[image_name]
try:
camera = camera_parameters[image_name]
except:
continue
f.write('%d %s %s %d %s\n\n' % (
image_id,
' '.join(map(str, camera.qvec)),
' '.join(map(str, camera.t)),
camera_id,
image_name
))
with open(paths.empty_model_path/'points3D.txt', 'w') as f:
pass
def import_features(images, paths, args):
# Connect to the database.
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
# Import the features.
print('Importing features...')
for image_name, image_id in tqdm(images.items(), total=len(images.items())):
features_path = paths.features_path/'{}.{}'.format(image_name, args.method_name)
keypoints = np.load(features_path)['keypoints']
n_keypoints = keypoints.shape[0]
# Keep only x, y coordinates.
keypoints = keypoints[:, : 2]
# Add placeholder scale, orientation.
keypoints = np.concatenate([keypoints, np.ones((n_keypoints, 1)), np.zeros((n_keypoints, 1))], axis=1).astype(np.float32)
keypoints_str = keypoints.tostring()
cursor.execute("INSERT INTO keypoints(image_id, rows, cols, data) VALUES(?, ?, ?, ?);",
(image_id, keypoints.shape[0], keypoints.shape[1], keypoints_str))
connection.commit()
# Close the connection to the database.
cursor.close()
connection.close()
def image_ids_to_pair_id(image_id1, image_id2):
if image_id1 > image_id2:
return 2147483647 * image_id2 + image_id1
else:
return 2147483647 * image_id1 + image_id2
def match_features(images, paths, args):
# Connect to the database.
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
# Match the features and insert the matches in the database.
print('Matching...')
with open(paths.match_list_path, 'r') as f:
raw_pairs = f.readlines()
image_pair_ids = set()
bar = tqdm(raw_pairs, total=len(raw_pairs))
for raw_pair in bar:
image_name1, image_name2 = raw_pair.strip('\n').split(' ')
features_path1 = paths.features_path/'{}.{}'.format(image_name1, args.method_name)
features_path2 = paths.features_path/'{}.{}'.format(image_name2, args.method_name)
descriptors1 = torch.from_numpy(np.load(features_path1)['descriptors']).to(device)
descriptors2 = torch.from_numpy(np.load(features_path2)['descriptors']).to(device)
matches = mutual_nn_matcher(descriptors1, descriptors2).astype(np.uint32)
image_id1, image_id2 = images[image_name1], images[image_name2]
image_pair_id = image_ids_to_pair_id(image_id1, image_id2)
if image_pair_id in image_pair_ids:
continue
image_pair_ids.add(image_pair_id)
if image_id1 > image_id2:
matches = matches[:, [1, 0]]
matches_str = np.int32(matches).tostring()
cursor.execute("INSERT INTO matches(pair_id, rows, cols, data) VALUES(?, ?, ?, ?);",
(image_pair_id, matches.shape[0], matches.shape[1], matches_str))
connection.commit()
# Close the connection to the database.
cursor.close()
connection.close()
def geometric_verification(paths, args):
print('Running geometric verification...')
subprocess.call([Path(args.colmap_path)/'colmap', 'matches_importer',
'--database_path', paths.database_path,
'--match_list_path', paths.match_list_path,
'--match_type', 'pairs'])
def reconstruct(paths, args):
if not paths.database_model_path.isdir():
paths.database_model_path.makedirs_p()
# Reconstruct the database model.
subprocess.call([Path(args.colmap_path)/'colmap', 'point_triangulator',
'--database_path', paths.database_path,
'--image_path', paths.image_path,
'--input_path', paths.empty_model_path,
'--output_path', paths.database_model_path,
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0',])
def register_queries(paths, args):
if not paths.final_model_path.isdir():
paths.final_model_path.makedirs_p()
# Register the query images.
subprocess.call([Path(args.colmap_path)/'colmap', 'image_registrator',
'--database_path', paths.database_path,
'--input_path', paths.database_model_path,
'--output_path', paths.final_model_path,
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_principal_point', '0',
'--Mapper.ba_refine_extra_params', '0'])
def recover_query_poses(paths, args):
print('Recovering query poses...')
if not paths.final_txt_model_path.isdir():
paths.final_txt_model_path.makedirs_p()
# Convert the model to TXT.
subprocess.call([Path(args.colmap_path)/'colmap', 'model_converter',
'--input_path', paths.final_model_path,
'--output_path', paths.final_txt_model_path,
'--output_type', 'TXT'])
# Recover query names.
if args.match_list_path == 'image_pairs_to_match.txt':
query_image_list_path = Path(args.dataset_path)/'queries/night_time_queries_with_intrinsics.txt'
with open(query_image_list_path) as f:
raw_queries = f.readlines()
else:
query_image_list_path = Path(args.dataset_path)/'queries/night_time_queries_with_intrinsics.txt'
with open(query_image_list_path) as f:
raw_queries = f.readlines()
query_image_list_path = Path(args.dataset_path)/'queries/day_time_queries_with_intrinsics.txt'
with open(query_image_list_path) as f:
tmp = f.readlines()
raw_queries.extend(tmp)
query_names = set()
for raw_query in raw_queries:
raw_query = raw_query.strip('\n').split(' ')
query_name = raw_query[0]
query_names.add(query_name)
with open(os.path.join(paths.final_txt_model_path, 'images.txt')) as f:
raw_extrinsics = f.readlines()
if not paths.prediction_path.parent.exists():
paths.prediction_path.parent.makedirs_p()
f = open(paths.prediction_path, 'w')
# Skip the header.
for extrinsics in raw_extrinsics[4 :: 2]:
extrinsics = extrinsics.strip('\n').split(' ')
image_name = extrinsics[-1]
if image_name in query_names:
# Skip the IMAGE_ID ([0]), CAMERA_ID ([-2]), and IMAGE_NAME ([-1]).
f.write('%s %s\n' % (image_name.split('/')[-1], ' '.join(extrinsics[1 : -2])))
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', required=True, help='Path to the dataset')
parser.add_argument('--feature_path', required=True, help='Path to the features')
parser.add_argument('--colmap_path', required=True, help='Path to the COLMAP executable folder')
parser.add_argument('--method_name', required=True, help='Name of the method')
parser.add_argument('--match_list_path', type=str, default='image_pairs_to_match.txt', help='config of reconstruct and register')
args = parser.parse_args()
# Torch settings for the matcher.
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Create the extra paths.
paths = types.SimpleNamespace()
paths.dummy_database_path = Path(args.dataset_path)/'others/database.db'
paths.database_path = Path(args.dataset_path)/'intermedia/{}/{}.db'.format(args.method_name,args.method_name)
paths.image_path = Path(args.dataset_path)/'images/images_upright'
paths.features_path = Path(args.feature_path)
paths.reference_model_path = Path(args.dataset_path)/'3D-models/aachen_v_1'
paths.match_list_path = Path(args.dataset_path)/'others/{}'.format(args.match_list_path)
paths.empty_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-empty'.format(args.method_name, args.method_name)
paths.database_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-database'.format(args.method_name, args.method_name)
paths.final_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-final'.format(args.method_name, args.method_name)
paths.final_txt_model_path = Path(args.dataset_path)/'intermedia/{}/sparse-{}-final-txt'.format(args.method_name, args.method_name)
paths.prediction_path = Path(args.dataset_path)/'results/Aachen_eval_[{}].txt'.format(args.method_name)
# Create a copy of the dummy database.
if paths.database_path.exists():
raise FileExistsError('The database file already exists for method %s.' % args.method_name)
if not paths.database_path.parent.exists():
paths.database_path.parent.makedirs_p()
shutil.copyfile(paths.dummy_database_path, paths.database_path)
# Reconstruction pipeline.
camera_parameters = preprocess_reference_model(paths, args)
images, cameras = recover_database_images_and_ids(paths, args)
generate_empty_reconstruction(images, cameras, camera_parameters, paths, args)
import_features(images, paths, args)
match_features(images, paths, args)
geometric_verification(paths, args)
reconstruct(paths, args)
register_queries(paths, args)
recover_query_poses(paths, args) | 13,096 | 35.686275 | 135 | py |
PoSFeat | PoSFeat-main/evaluations/aachen/matchers.py | import torch
# Mutual nearest neighbors matcher for L2 normalized descriptors.
def mutual_nn_matcher(descriptors1, descriptors2):
device = descriptors1.device
sim = descriptors1 @ descriptors2.t()
nn12 = torch.max(sim, dim=1)[1]
nn21 = torch.max(sim, dim=0)[1]
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = ids1 == nn21[nn12]
matches = torch.stack([ids1[mask], nn12[mask]]).t()
return matches.data.cpu().numpy()
# Symmetric Lowe's ratio test matcher for L2 normalized descriptors.
def ratio_matcher(descriptors1, descriptors2, ratio=0.95):
device = descriptors1.device
sim = descriptors1 @ descriptors2.t()
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim, 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios12 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN.
nn12 = nns[:, 0]
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim.t(), 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios21 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN.
nn21 = nns[:, 0]
# Symmetric ratio test.
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = torch.min(ratios12 <= ratio, ratios21[nn12] <= ratio)
# Final matches.
matches = torch.stack([ids1[mask], nn12[mask]], dim=-1)
return matches.data.cpu().numpy()
# Mutual NN + symmetric Lowe's ratio test matcher for L2 normalized descriptors.
def mutual_nn_ratio_matcher(descriptors1, descriptors2, ratio=0.95):
device = descriptors1.device
sim = descriptors1 @ descriptors2.t()
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim, 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios12 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN and match similarity.
nn12 = nns[:, 0]
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim.t(), 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios21 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN.
nn21 = nns[:, 0]
# Mutual NN + symmetric ratio test.
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = torch.min(ids1 == nn21[nn12], torch.min(ratios12 <= ratio, ratios21[nn12] <= ratio))
# Final matches.
matches = torch.stack([ids1[mask], nn12[mask]], dim=-1)
return matches.data.cpu().numpy()
| 2,562 | 32.285714 | 95 | py |
PoSFeat | PoSFeat-main/evaluations/hpatches/evaluation.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
from scipy.io import loadmat
from tqdm import tqdm
from path import Path
use_cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')
methods = ['hesaff', 'hesaffnet', 'contextdesc', 'd2-net', 'r2d2', 'aslfeat', 'disk-d-8k-official', 'delf-new', 'superpoint', 'caps', 'disk-epipolar', 'PoSFeat_CVPR']
names = ['Hes. Aff. + Root-SIFT', 'HAN + HN++', 'SIFT + ContextDesc', 'D2-Net', 'R2D2', 'ASLFeat', 'DISK', 'DELF', 'SuperPoint', 'SIFT + CAPS', 'DISK-W', 'PoSFeat']
colors = ['tan', 'orange', 'peru', 'skyblue', 'purple', 'tomato', 'yellowgreen', 'gray', 'darkcyan', 'slateblue', 'yellowgreen', 'red']
linestyles = ['--','--','--','--','--','--','--','-', '-', '-', '-', '-', '-']
top_k = None
n_i = 52
n_v = 56
dataset_path = Path('/data/kunb/hpatches/hpatches-sequences-release')
features_path = Path('../../ckpts/hpatches/PoSFeat_mytrain/desc')
lim = [1, 15]
rng = np.arange(lim[0], lim[1] + 1)
@torch.no_grad()
def mnn_matcher(descriptors_a, descriptors_b):
device = descriptors_a.device
sim = descriptors_a @ descriptors_b.t()
# print(sim)
# print(sim.max(), sim.min())
nn12 = torch.max(sim, dim=1)[1]
nn21 = torch.max(sim, dim=0)[1]
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = (ids1 == nn21[nn12])
matches = torch.stack([ids1[mask], nn12[mask]])
return matches.t().data.cpu().numpy()
def benchmark_features(read_feats):
seq_names = sorted(dataset_path.listdir())
n_feats = []
n_matches = []
seq_type = []
i_err = {thr: 0 for thr in rng}
v_err = {thr: 0 for thr in rng}
for seq_idx, seq_name in tqdm(enumerate(seq_names), total=len(seq_names)):
seq_name = seq_name.name
keypoints_a, descriptors_a = read_feats(seq_name, 1)
if keypoints_a.shape[0] > 60000:
keypoints_a = keypoints_a[:60000,:]
descriptors_a = descriptors_a[:60000, :]
n_feats.append(keypoints_a.shape[0])
for im_idx in range(2, 7):
keypoints_b, descriptors_b = read_feats(seq_name, im_idx)
if keypoints_b.shape[0] > 60000:
keypoints_b = keypoints_b[:60000,:]
descriptors_b = descriptors_b[:60000, :]
n_feats.append(keypoints_b.shape[0])
matches = mnn_matcher(
torch.from_numpy(descriptors_a).to(device=device),
torch.from_numpy(descriptors_b).to(device=device)
)
homography = np.loadtxt(dataset_path/"{}/H_1_{}".format(seq_name, im_idx))
pos_a = keypoints_a[matches[:, 0], : 2]
pos_a_h = np.concatenate([pos_a, np.ones([matches.shape[0], 1])], axis=1)
pos_b_proj_h = np.transpose(np.dot(homography, np.transpose(pos_a_h)))
pos_b_proj = pos_b_proj_h[:, : 2] / pos_b_proj_h[:, 2 :]
pos_b = keypoints_b[matches[:, 1], : 2]
dist = np.sqrt(np.sum((pos_b - pos_b_proj) ** 2, axis=1))
n_matches.append(matches.shape[0])
seq_type.append(seq_name[0])
if dist.shape[0] == 0:
dist = np.array([float("inf")])
for thr in rng:
if seq_name[0] == 'i':
i_err[thr] += np.mean(dist <= thr)
else:
v_err[thr] += np.mean(dist <= thr)
seq_type = np.array(seq_type)
n_feats = np.array(n_feats)
n_matches = np.array(n_matches)
return i_err, v_err, [seq_type, n_feats, n_matches]
def summary(stats):
seq_type, n_feats, n_matches = stats
print('# Features: {:f} - [{:d}, {:d}]'.format(np.mean(n_feats), np.min(n_feats), np.max(n_feats)))
print('# Matches: Overall {:f}, Illumination {:f}, Viewpoint {:f}'.format(
np.sum(n_matches) / ((n_i + n_v) * 5),
np.sum(n_matches[seq_type == 'i']) / (n_i * 5),
np.sum(n_matches[seq_type == 'v']) / (n_v * 5))
)
def generate_read_function(method, extension='ppm'):
def read_function(seq_name, im_idx):
aux = np.load(features_path/"{}/{}.{}.{}".format(seq_name, im_idx, extension, method))
if top_k is None:
return aux['keypoints'], aux['descriptors']
else:
assert('scores' in aux)
ids = np.argsort(aux['scores'])[-top_k :]
return aux['keypoints'][ids, :], aux['descriptors'][ids, :]
return read_function
def sift_to_rootsift(descriptors):
return np.sqrt(descriptors / np.expand_dims(np.sum(np.abs(descriptors), axis=1), axis=1) + 1e-16)
def parse_mat(mat):
keypoints = mat['keypoints'][:, : 2]
raw_descriptors = mat['descriptors']
l2_norm_descriptors = raw_descriptors / np.expand_dims(np.sum(raw_descriptors ** 2, axis=1), axis=1)
descriptors = sift_to_rootsift(l2_norm_descriptors)
if top_k is None:
return keypoints, descriptors
else:
assert('scores' in mat)
ids = np.argsort(mat['scores'][0])[-top_k :]
return keypoints[ids, :], descriptors[ids, :]
if top_k is None:
cache_dir = 'cache'
else:
cache_dir = 'cache-top'
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
errors = {}
for method in methods:
output_file = os.path.join(cache_dir, method + '.npy')
print(method)
if method == 'hesaff':
read_function = lambda seq_name, im_idx: parse_mat(loadmat(os.path.join(dataset_path, seq_name, '%d.ppm.hesaff' % im_idx), appendmat=False))
else:
if method == 'delf' or method == 'delf-new':
read_function = generate_read_function(method, extension='png')
else:
read_function = generate_read_function(method)
if os.path.exists(output_file):
print('Loading precomputed errors...')
errors[method] = np.load(output_file, allow_pickle=True)
else:
errors[method] = benchmark_features(read_function)
saved = np.array(errors[method], dtype=object)
np.save(output_file, saved)
summary(errors[method][-1])
# evalute MMA score
MMAscore = {}
for method in methods:
i_err, v_err, _ = errors[method]
tmp_a = []
tmp_i = []
tmp_v = []
for thr in range(1,11):
tmp_a.append((i_err[thr] + v_err[thr]) / ((n_i + n_v) * 5))
tmp_i.append(i_err[thr] / (n_i * 5))
tmp_v.append(v_err[thr] / (n_v * 5))
cur_a = 0
cur_i = 0
cur_v = 0
upper_bound = 0
for idx, (mma_a, mma_i, mma_v) in enumerate(zip(tmp_a, tmp_i, tmp_v)):
cur_a += (2-(idx+1)/10.)*mma_a
cur_i += (2-(idx+1)/10.)*mma_i
cur_v += (2-(idx+1)/10.)*mma_v
upper_bound += (2-(idx+1)/10.)*1
MMAscore[method] = (cur_a/upper_bound, cur_i/upper_bound, cur_v/upper_bound)
# plot
plt_lim = [1, 10]
plt_rng = np.arange(plt_lim[0], plt_lim[1] + 1)
plt_ylim = [0, 1]
plt.rc('axes', titlesize=25)
plt.rc('axes', labelsize=25)
labelsize = 20
plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
for method, name, color, ls in zip(methods, names, colors, linestyles):
i_err, v_err, _ = errors[method]
plt.plot(plt_rng, [(i_err[thr] + v_err[thr]) / ((n_i + n_v) * 5) for thr in plt_rng], color=color, ls=ls, linewidth=3, label=name)
plt.title('Overall')
plt.xlim(plt_lim)
plt.xticks(plt_rng)
plt.ylabel('MMA')
plt.ylim(plt_ylim)
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=labelsize)
# plt.legend()
plt.subplot(1, 3, 2)
for method, name, color, ls in zip(methods, names, colors, linestyles):
i_err, v_err, _ = errors[method]
plt.plot(plt_rng, [i_err[thr] / (n_i * 5) for thr in plt_rng], color=color, ls=ls, linewidth=3, label=name)
plt.title('Illumination')
plt.xlabel('threshold [px]')
plt.xlim(plt_lim)
plt.xticks(plt_rng)
plt.ylim(plt_ylim)
plt.gca().axes.set_yticklabels([])
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=labelsize)
plt.subplot(1, 3, 3)
for method, name, color, ls in zip(methods, names, colors, linestyles):
i_err, v_err, _ = errors[method]
plt.plot(plt_rng, [v_err[thr] / (n_v * 5) for thr in plt_rng], color=color, ls=ls, linewidth=3, label=name)
plt.title('Viewpoint')
plt.xlim(plt_lim)
plt.xticks(plt_rng)
plt.ylim(plt_ylim)
plt.gca().axes.set_yticklabels([])
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=labelsize)
import datetime
timestamp = datetime.datetime.now().strftime("%m-%d-%H:%M")
if top_k is None:
plt.savefig('hseq{}.pdf'.format(timestamp), bbox_inches='tight', dpi=300)
plt.savefig('hseq{}.eps'.format(timestamp), bbox_inches='tight', dpi=300)
else:
plt.savefig('hseq-top.pdf', bbox_inches='tight', dpi=300)
plt.legend()
if top_k is None:
plt.savefig('hseq{}_label.pdf'.format(timestamp), bbox_inches='tight', dpi=300)
else:
plt.savefig('hseq-top_label.pdf', bbox_inches='tight', dpi=300)
with open('hseq{}.txt'.format(timestamp), 'w') as f:
lines = ''
for name, method in zip(names, methods):
name = name.ljust(25, ' ')
tmp_stat = errors[method][-1]
seq_type, n_feats, n_matches = tmp_stat
num_feat = np.mean(n_feats)
num_match = np.sum(n_matches) / ((n_i + n_v) * 5)
mmascore = MMAscore[method]
lines += '{} & {:.1f} & {:.1f} & {:.3f} & {:.3f} & {:.3f}\n'.format(
name, num_feat, num_match, mmascore[0], mmascore[1], mmascore[2])
f.write(lines) | 9,402 | 35.587549 | 166 | py |
PoSFeat | PoSFeat-main/evaluations/ETH_local_feature/reconstruction_pipeline.py | # Import the features and matches into a COLMAP database.
#
# Copyright 2017: Johannes L. Schoenberger <jsch at inf.ethz.ch>
from __future__ import print_function, division
import os
import sys
import glob
import yaml
import types
import torch
import shutil
import sqlite3
import argparse
import subprocess
import multiprocessing
import numpy as np
from path import Path
from tqdm import tqdm
import custom_matcher as cms
IS_PYTHON3 = sys.version_info[0] >= 3
def parse_args():
parser = argparse.ArgumentParser()
# parser.add_argument("--dataset_path", required=True,
# help="Path to the dataset, e.g., path/to/Fountain")
# parser.add_argument("--colmap_path", required=True,
# help="Path to the COLMAP executable folder, e.g., "
# "path/to/colmap/build/src/exe")
# parser.add_argument("--features_path", required=True,
# help="Path to the features folder, e.g., "
# "path/to/feature")
# parser.add_argument("--method_postfix", required=True,
# help="the postfix of the method")
# parser.add_argument("--matcher", required=True,
# help="the matcher")
parser.add_argument("--config", required=True,
help="Path to the configs, e.g., path/to/Fountain")
args = parser.parse_args()
return args
def image_ids_to_pair_id(image_id1, image_id2):
if image_id1 > image_id2:
return 2147483647 * image_id2 + image_id1
else:
return 2147483647 * image_id1 + image_id2
def import_features_and_match(configs, paths):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
cursor.execute("SELECT name FROM sqlite_master "
"WHERE type='table' AND name='inlier_matches';")
try:
inlier_matches_table_exists = bool(next(cursor)[0])
except StopIteration:
inlier_matches_table_exists = False
cursor.execute("DELETE FROM keypoints;")
cursor.execute("DELETE FROM descriptors;")
cursor.execute("DELETE FROM matches;")
if inlier_matches_table_exists:
cursor.execute("DELETE FROM inlier_matches;")
else:
cursor.execute("DELETE FROM two_view_geometries;")
connection.commit()
images = {}
cursor.execute("SELECT name, image_id FROM images;")
for row in cursor:
images[row[0]] = row[1]
for image_name, image_id in tqdm(images.items(), total=len(images.items())):
feature_path = paths.features_path/'{}.{}'.format(image_name, configs['method_postfix'])
feature_file = np.load(feature_path)
keypoints = feature_file['keypoints'][:,:2]
descriptors = feature_file['descriptors']
assert keypoints.shape[1] == 2
assert keypoints.shape[0] == descriptors.shape[0]
keypoints_str = keypoints.tobytes() # early python3 use .tostring()
cursor.execute("INSERT INTO keypoints(image_id, rows, cols, data) "
"VALUES(?, ?, ?, ?);",
(image_id, keypoints.shape[0], keypoints.shape[1],
keypoints_str))
connection.commit()
# custom match
matcher = getattr(cms, configs['matcher'])
image_names = list(images.keys())
image_pairs = []
image_pair_ids = set()
for idx_total, image_name1 in enumerate(tqdm(image_names[:-1])):
feature_path1 = paths.features_path/'{}.{}'.format(image_name1, configs['method_postfix'])
descriptors1 = np.load(feature_path1)['descriptors']
descriptors1 = torch.from_numpy(descriptors1).to(device)
bar = tqdm(image_names[idx_total+1:])
for idx_sub, image_name2 in enumerate(bar):
image_pairs.append((image_name1, image_name2))
image_id1, image_id2 = images[image_name1], images[image_name2]
image_pair_id = image_ids_to_pair_id(image_id1, image_id2)
if image_pair_id in image_pair_ids:
continue
feature_path2 = paths.features_path/'{}.{}'.format(image_name2, configs['method_postfix'])
descriptors2 = np.load(feature_path2)['descriptors']
descriptors2 = torch.from_numpy(descriptors2).to(device)
matches = matcher(descriptors1, descriptors2, **configs['matcher_config'])
assert matches.shape[1] == 2
# bar.write("matches: {}".format(matches.shape[0]))
image_pair_ids.add(image_pair_id)
if image_id1 > image_id2:
matches = matches[:, [1, 0]]
matches_str = np.int32(matches).tostring()
cursor.execute("INSERT INTO matches(pair_id, rows, cols, data) "
"VALUES(?, ?, ?, ?);",
(image_pair_id, matches.shape[0], matches.shape[1],
matches_str))
connection.commit()
torch.cuda.empty_cache()
with open(paths.match_list_path, 'w') as fid:
for image_name1, image_name2 in image_pairs:
fid.write("{} {}\n".format(image_name1, image_name2))
cursor.close()
connection.close()
subprocess.call([paths.colmap_path,
"matches_importer",
"--database_path",
paths.database_path,
"--match_list_path",
paths.match_list_path,
"--match_type", "pairs"])
# connection = sqlite3.connect(os.path.join(args.dataset_path, "database.db"))
connection = sqlite3.connect(paths.database_path)
cursor = connection.cursor()
cursor.execute("SELECT count(*) FROM images;")
num_images = next(cursor)[0]
cursor.execute("SELECT count(*) FROM two_view_geometries WHERE rows > 0;")
num_inlier_pairs = next(cursor)[0]
cursor.execute("SELECT sum(rows) FROM two_view_geometries WHERE rows > 0;")
num_inlier_matches = next(cursor)[0]
cursor.close()
connection.close()
return dict(num_images=num_images,
num_inlier_pairs=num_inlier_pairs,
num_inlier_matches=num_inlier_matches)
def reconstruct(configs, paths):
database_path = paths.database_path
image_path = paths.image_path
sparse_path = paths.features_path.parent/"{}_sparse".format(configs['subfolder'])
dense_path = paths.features_path.parent/"{}_dense".format(configs['subfolder'])
if not sparse_path.exists():
sparse_path.makedirs_p()
if not dense_path.exists():
dense_path.makedirs_p()
# Run the sparse reconstruction.
subprocess.call([paths.colmap_path,
"mapper",
"--database_path", database_path,
"--image_path", image_path,
"--output_path", sparse_path,
"--Mapper.num_threads",
str(min(multiprocessing.cpu_count(), 16))])
# Find the largest reconstructed sparse model.
models = sparse_path.listdir()
if len(models) == 0:
print("Warning: Could not reconstruct any model")
return
largest_model = None
largest_model_num_images = 0
for model in models:
subprocess.call([paths.colmap_path,
"model_converter",
"--input_path", model,
"--output_path", model,
"--output_type", "TXT"])
with open("{}/cameras.txt".format(model), 'r') as fid:
for line in fid:
if line.startswith("# Number of cameras"):
num_images = int(line.split()[-1])
if num_images > largest_model_num_images:
largest_model = model
largest_model_num_images = num_images
break
assert largest_model_num_images > 0
# Run the dense reconstruction.
largest_model_path = largest_model
### the codes for dense reconstruction
# workspace_path = dense_path/largest_model.name
# if not workspace_path.exists():
# workspace_path.makedirs_p()
# subprocess.call([paths.colmap_path,
# "image_undistorter",
# "--image_path", image_path,
# "--input_path", largest_model_path,
# "--output_path", workspace_path,
# "--max_image_size", "1200"])
# subprocess.call([paths.colmap_path,
# "patch_match_stereo",
# "--workspace_path", workspace_path,
# "--PatchMatchStereo.geom_consistency", "false"])
# subprocess.call([paths.colmap_path,
# "stereo_fusion",
# "--workspace_path", workspace_path,
# "--input_type", "photometric",
# "--output_path", os.path.join(workspace_path, "fused.ply"),
# "--StereoFusion.min_num_pixels", "5"])
stats = subprocess.check_output(
[paths.colmap_path, "model_analyzer",
"--path", largest_model_path])
stats = stats.decode().split("\n")
for stat in stats:
if stat.startswith("Registered images"):
num_reg_images = int(stat.split()[-1])
elif stat.startswith("Points"):
num_sparse_points = int(stat.split()[-1])
elif stat.startswith("Observations"):
num_observations = int(stat.split()[-1])
elif stat.startswith("Mean track length"):
mean_track_length = float(stat.split()[-1])
elif stat.startswith("Mean observations per image"):
num_observations_per_image = float(stat.split()[-1])
elif stat.startswith("Mean reprojection error"):
mean_reproj_error = float(stat.split()[-1][:-2])
# returns with dense results
# with open(os.path.join(workspace_path, "fused.ply"), "rb") as fid:
# line = fid.readline().decode()
# while line:
# if line.startswith("element vertex"):
# num_dense_points = int(line.split()[-1])
# break
# line = fid.readline().decode()
# return dict(num_reg_images=num_reg_images,
# num_sparse_points=num_sparse_points,
# num_observations=num_observations,
# mean_track_length=mean_track_length,
# num_observations_per_image=num_observations_per_image,
# mean_reproj_error=mean_reproj_error,
# num_dense_points=num_dense_points)
## returns without dense results
return dict(num_reg_images=num_reg_images,
num_sparse_points=num_sparse_points,
num_observations=num_observations,
mean_track_length=mean_track_length,
num_observations_per_image=num_observations_per_image,
mean_reproj_error=mean_reproj_error)
def main():
args = parse_args()
with open(args.config, 'r') as f:
configs = yaml.load(f, Loader=yaml.FullLoader)
configs['method_postfix'] = configs['postfix']
configs['features_path'] = '../../ckpts/{}/desc'.format(configs['output_root'])
configs['dataset_path'] = configs['data_config_extract']['data_path']
configs['subfolder'] = configs['data_config_extract']['subfolder']
paths = types.SimpleNamespace()
paths.colmap_path = Path(configs['colmap_path'])/'colmap'
paths.dataset_path = Path(configs['dataset_path'])/'{}'.format(
configs['subfolder'])
paths.image_path = paths.dataset_path/"images"
paths.features_path = Path(configs['features_path'])/'{}'.format(
configs['subfolder'])
paths.database_path = paths.features_path.parent/'{}_{}.db'.format(
configs['subfolder'], configs['method_postfix'])
paths.match_list_path = paths.features_path/'image_pairs_{}.txt'.format(
configs['method_postfix'])
paths.result_path = Path(configs['features_path'])/'res_{}_{}.txt'.format(
configs['subfolder'], configs['method_postfix'])
# print(paths.match_list_path)
if paths.database_path.exists():
raise FileExistsError('The {} database already exists for method \
{}.'.format(configs['subfolder'], configs['method_postfix']))
shutil.copyfile(paths.dataset_path/'database.db', paths.database_path)
matching_stats = import_features_and_match(configs, paths)
reconstruction_stats = reconstruct(configs, paths)
print()
print(78 * "=")
print("Raw statistics")
print(78 * "=")
print(matching_stats)
print(reconstruction_stats)
print()
print(78 * "=")
print("Formatted statistics")
print(78 * "=")
# strings = "| " + " | ".join(
# map(str, [paths.dataset_path.basename(),
# "METHOD",
# matching_stats["num_images"],
# reconstruction_stats["num_reg_images"],
# reconstruction_stats["num_sparse_points"],
# reconstruction_stats["num_observations"],
# reconstruction_stats["mean_track_length"],
# reconstruction_stats["num_observations_per_image"],
# reconstruction_stats["mean_reproj_error"],
# reconstruction_stats["num_dense_points"],
# "",
# "",
# "",
# "",
# matching_stats["num_inlier_pairs"],
# matching_stats["num_inlier_matches"]])) + " |"
strings_key = '{}|'.format(paths.dataset_path.basename())
strings_val = '{}|'.format(paths.dataset_path.basename())
for key, val in reconstruction_stats.items():
strings_key += '{}|'.format(key)
tmp_str = '{}'.format(val)
tmp_str = tmp_str.rjust(len(key), ' ')
tmp_str = tmp_str +'|'
strings_val += tmp_str
strings_key += '\n'
strings_val += '\n'
print(strings_key+strings_val)
with open(paths.result_path, 'w') as fid:
fid.write(strings_key+strings_val)
if __name__ == "__main__":
main()
| 14,280 | 38.233516 | 102 | py |
PoSFeat | PoSFeat-main/evaluations/ETH_local_feature/custom_matcher.py | import torch
# Mutual nearest neighbors matcher for L2 normalized descriptors.
def mutual_nn_matcher(descriptors1, descriptors2, **args):
device = descriptors1.device
sim = descriptors1 @ descriptors2.t()
nn12 = torch.max(sim, dim=1)[1]
nn21 = torch.max(sim, dim=0)[1]
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = ids1 == nn21[nn12]
matches = torch.stack([ids1[mask], nn12[mask]]).t()
return matches.data.cpu().numpy()
# Symmetric Lowe's ratio test matcher for L2 normalized descriptors.
def ratio_matcher(descriptors1, descriptors2, ratio=0.95):
device = descriptors1.device
sim = descriptors1 @ descriptors2.t()
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim, 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios12 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN.
nn12 = nns[:, 0]
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim.t(), 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios21 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN.
nn21 = nns[:, 0]
# Symmetric ratio test.
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = torch.min(ratios12 <= ratio, ratios21[nn12] <= ratio)
# Final matches.
matches = torch.stack([ids1[mask], nn12[mask]], dim=-1)
return matches.data.cpu().numpy()
# Mutual NN + symmetric Lowe's ratio test matcher for L2 normalized descriptors.
def mutual_nn_ratio_matcher(descriptors1, descriptors2, ratio=0.95):
device = descriptors1.device
sim = descriptors1 @ descriptors2.t()
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim, 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios12 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN and match similarity.
nn12 = nns[:, 0]
# Retrieve top 2 nearest neighbors 1->2.
nns_sim, nns = torch.topk(sim.t(), 2, dim=1)
nns_dist = torch.sqrt(2 - 2 * nns_sim)
# Compute Lowe's ratio.
ratios21 = nns_dist[:, 0] / (nns_dist[:, 1] + 1e-8)
# Save first NN.
nn21 = nns[:, 0]
# Mutual NN + symmetric ratio test.
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = torch.min(ids1 == nn21[nn12], torch.min(ratios12 <= ratio, ratios21[nn12] <= ratio))
# Final matches.
matches = torch.stack([ids1[mask], nn12[mask]], dim=-1)
return matches.data.cpu().numpy() | 2,567 | 33.702703 | 95 | py |
PoSFeat | PoSFeat-main/datasets/aachen.py | import torch
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import skimage.io as io
from path import Path
import cv2
import torch.nn.functional as F
class Aachen_Day_Night(Dataset):
def __init__(self, configs):
super(Aachen_Day_Night, self).__init__()
self.configs = configs
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
# self.imfs = []
self.sift = cv2.SIFT_create()
imdir = Path(self.configs['data_path'])
dbimgs = imdir.glob('db/*.jpg')
queryimgs = imdir.glob('query/*/*/*.jpg')
sequences1 = imdir.glob('sequences/gopro3_undistorted/*.png')
sequences2 = imdir.glob('sequences/nexus4_sequences/*/*.png')
self.imfs = dbimgs
self.imfs.extend(queryimgs)
self.imfs.extend(sequences1)
self.imfs.extend(sequences2)
self.imfs.sort()
def __getitem__(self, item):
imf = self.imfs[item]
im = io.imread(imf)
imf_split = imf.split('/')
if 'db' in imf_split:
name = imf_split[-2:]
name = '/'.join(name)
elif 'query' in imf_split:
name = imf_split[-4:]
name = '/'.join(name)
elif 'gopro3_undistorted' in imf_split:
name = imf_split[-3:]
name = '/'.join(name)
elif 'nexus4_sequences' in imf_split:
name = imf_split[-4:]
name = '/'.join(name)
im_tensor = self.transform(im)
c, h, w = im_tensor.shape
pad=(0,0,0,0)
# now use crop to get suitable size
crop_r = w%16
crop_b = h%16
im_tensor = im_tensor[:,:h-crop_b,:w-crop_r]
im = im[:h-crop_b,:w-crop_r,:]
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
kpts = self.sift.detect(gray)
kpts = np.array([[kp.pt[0], kp.pt[1]] for kp in kpts])
coord = torch.from_numpy(kpts).float()
out = {'im1': im_tensor, 'im1_ori':im, 'coord1': coord, 'name1': name, 'pad1':pad}
return out
def __len__(self):
return len(self.imfs) | 2,354 | 35.230769 | 93 | py |
PoSFeat | PoSFeat-main/datasets/data_utils.py | import numpy as np
import cv2
import torch
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
def rotateImage(image, angle):
h, w = image.shape[:2]
angle_radius = np.abs(angle / 180. * np.pi)
cos = np.cos(angle_radius)
sin = np.sin(angle_radius)
tan = np.tan(angle_radius)
scale_h = (h / cos + (w - h * tan) * sin) / h
scale_w = (h / sin + (w - h / tan) * cos) / w
scale = max(scale_h, scale_w)
image_center = tuple(np.array(image.shape[1::-1]) / 2.)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, scale)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
rotation = np.eye(4)
rotation[:2, :2] = rot_mat[:2, :2]
return result, rotation
def perspective_transform(img, param=0.001):
h, w = img.shape[:2]
random_state = np.random.RandomState(None)
M = np.array([[1 - param + 2 * param * random_state.rand(),
-param + 2 * param * random_state.rand(),
-param + 2 * param * random_state.rand()],
[-param + 2 * param * random_state.rand(),
1 - param + 2 * param * random_state.rand(),
-param + 2 * param * random_state.rand()],
[-param + 2 * param * random_state.rand(),
-param + 2 * param * random_state.rand(),
1 - param + 2 * param * random_state.rand()]])
dst = cv2.warpPerspective(img, M, (w, h))
return dst, M
def generate_query_kpts(img, mode, num_pts, h, w):
# generate candidate query points
if mode == 'random':
kp1_x = np.random.rand(num_pts) * (w - 1)
kp1_y = np.random.rand(num_pts) * (h - 1)
coord = np.stack((kp1_x, kp1_y)).T
elif mode == 'sift':
gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_pts)
kp1 = sift.detect(gray1)
coord = np.array([[kp.pt[0], kp.pt[1]] for kp in kp1])
elif mode == 'mixed':
kp1_x = np.random.rand(1 * int(0.1 * num_pts)) * (w - 1)
kp1_y = np.random.rand(1 * int(0.1 * num_pts)) * (h - 1)
kp1_rand = np.stack((kp1_x, kp1_y)).T
sift = cv2.xfeatures2d.SIFT_create(nfeatures=int(0.9 * num_pts))
gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kp1_sift = sift.detect(gray1)
kp1_sift = np.array([[kp.pt[0], kp.pt[1]] for kp in kp1_sift])
if len(kp1_sift) == 0:
coord = kp1_rand
else:
coord = np.concatenate((kp1_rand, kp1_sift), 0)
else:
raise Exception('unknown type of keypoints')
return coord
def prune_kpts(coord1, F_gt, im2_size, intrinsic1, intrinsic2, pose, d_min, d_max):
# compute the epipolar lines corresponding to coord1
coord1_h = np.concatenate([coord1, np.ones_like(coord1[:, [0]])], axis=1).T # 3xn
epipolar_line = F_gt.dot(coord1_h) # 3xn
epipolar_line /= np.clip(np.linalg.norm(epipolar_line[:2], axis=0), a_min=1e-10, a_max=None) # 3xn
# determine whether the epipolar lines intersect with the second image
h2, w2 = im2_size
corners = np.array([[0, 0, 1], [0, h2 - 1, 1], [w2 - 1, 0, 1], [w2 - 1, h2 - 1, 1]]) # 4x3
dists = np.abs(corners.dot(epipolar_line))
# if the epipolar line is far away from any image corners than sqrt(h^2+w^2)
# it doesn't intersect with the image
non_intersect = (dists > np.sqrt(w2 ** 2 + h2 ** 2)).any(axis=0)
# determine if points in coord1 is likely to have correspondence in the other image by the rough depth range
intrinsic1_4x4 = np.eye(4)
intrinsic1_4x4[:3, :3] = intrinsic1
intrinsic2_4x4 = np.eye(4)
intrinsic2_4x4[:3, :3] = intrinsic2
coord1_h_min = np.concatenate([d_min * coord1,
d_min * np.ones_like(coord1[:, [0]]),
np.ones_like(coord1[:, [0]])], axis=1).T
coord1_h_max = np.concatenate([d_max * coord1,
d_max * np.ones_like(coord1[:, [0]]),
np.ones_like(coord1[:, [0]])], axis=1).T
coord2_h_min = intrinsic2_4x4.dot(pose).dot(np.linalg.inv(intrinsic1_4x4)).dot(coord1_h_min)
coord2_h_max = intrinsic2_4x4.dot(pose).dot(np.linalg.inv(intrinsic1_4x4)).dot(coord1_h_max)
coord2_min = coord2_h_min[:2] / (coord1_h_min[2] + 1e-10)
coord2_max = coord2_h_max[:2] / (coord1_h_max[2] + 1e-10)
out_range = ((coord2_min[0] < 0) & (coord2_max[0] < 0)) | \
((coord2_min[1] < 0) & (coord2_max[1] < 0)) | \
((coord2_min[0] > w2 - 1) & (coord2_max[0] > w2 - 1)) | \
((coord2_min[1] > h2 - 1) & (coord2_max[1] > h2 - 1))
ind_intersect = ~(non_intersect | out_range)
return ind_intersect
def random_choice(array, size):
rand = np.random.RandomState(1234)
num_data = len(array)
if num_data > size:
idx = rand.choice(num_data, size, replace=False)
else:
idx = rand.choice(num_data, size, replace=True)
return array[idx]
def tensor2array(tensor, max_value=None, colormap='coolwarm'):
def high_res_colormap(low_res_cmap, resolution=1000, max_value=1):
# Construct the list colormap, with interpolated values for higer resolution
# For a linear segmented colormap, you can just specify the number of point in
# cm.get_cmap(name, lutsize) with the parameter lutsize
x = np.linspace(0, 1, low_res_cmap.N)
low_res = low_res_cmap(x)
new_x = np.linspace(0, max_value, resolution)
high_res = np.stack([np.interp(new_x, x, low_res[:, i])
for i in range(low_res.shape[1])], axis=1)
return ListedColormap(high_res)
def opencv_rainbow(resolution=1000):
# Construct the opencv equivalent of Rainbow
opencv_rainbow_data = (
(0.000, (1.00, 0.00, 0.00)),
(0.400, (1.00, 1.00, 0.00)),
(0.600, (0.00, 1.00, 0.00)),
(0.800, (0.00, 0.00, 1.00)),
(1.000, (0.60, 0.00, 1.00))
)
return LinearSegmentedColormap.from_list('opencv_rainbow', opencv_rainbow_data, resolution)
COLORMAPS = {'rainbow': opencv_rainbow(),
'magma': high_res_colormap(cm.get_cmap('magma')),
'bone': cm.get_cmap('bone', 10000),
'seismic':high_res_colormap(cm.get_cmap('seismic')),
'coolwarm':high_res_colormap(cm.get_cmap('coolwarm'))}
tensor = tensor.detach().cpu()
if max_value is None:
max_value = tensor.max().item()
if tensor.ndimension() == 2 or tensor.size(0) == 1:
norm_array = tensor.squeeze().numpy()/max_value
if colormap in list(COLORMAPS.keys()):
map_func = COLORMAPS[colormap]
else:
map_func = high_res_colormap(cm.get_cmap(colormap))
array = map_func(norm_array).astype(np.float32)
array = array.transpose(2, 0, 1)
elif tensor.ndimension() == 3:
# assert(tensor.size(0) == 3)
array = tensor.numpy()
return array | 7,228 | 41.274854 | 112 | py |
PoSFeat | PoSFeat-main/datasets/ETH_local_feature.py | import torch
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import skimage.io as io
from path import Path
import cv2
import torch.nn.functional as F
class ETH_LFB(Dataset):
def __init__(self, configs):
"""
dataset for eth local feature benchmark
"""
super(ETH_LFB, self).__init__()
self.configs = configs
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
# self.imfs = []
self.sift = cv2.SIFT_create()
imdir = Path(self.configs['data_path'])
folder_dir = imdir/self.configs['subfolder']
images_dir = folder_dir/'images'
imgs = images_dir.glob('*')
self.imfs = imgs
self.imfs.sort()
def __getitem__(self, item):
imf = self.imfs[item]
im = io.imread(imf)
name = imf.name
name = '{}/{}'.format(self.configs['subfolder'], name)
if len(im.shape) != 3: #gray images
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
im = im.copy()
im_tensor = self.transform(im) #
c, h, w = im_tensor.shape
# pad_b = 16 - h%16
# pad_r = 16 - w%16
# pad = (0,pad_r,0,pad_b)
# im_tensor = F.pad(im_tensor.unsqueeze(0), pad, mode='replicate').squeeze(0)
pad=(0,0,0,0)
# now use crop to get suitable size
crop_r = w%16
crop_b = h%16
im_tensor = im_tensor[:,:h-crop_b,:w-crop_r]
im = im[:h-crop_b,:w-crop_r,:]
# using sift keypoints
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
kpts = self.sift.detect(gray)
kpts = np.array([[kp.pt[0], kp.pt[1]] for kp in kpts])
coord = torch.from_numpy(kpts).float()
out = {'im1': im_tensor, 'im1_ori':im, 'coord1': coord, 'name1': name, 'pad1':pad}
return out
def __len__(self):
return len(self.imfs) | 2,147 | 34.8 | 93 | py |
PoSFeat | PoSFeat-main/datasets/hpatches.py | import torch
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import skimage.io as io
from path import Path
import cv2
import torch.nn.functional as F
class HPatch_SIFT(Dataset):
def __init__(self, configs):
super(HPatch_SIFT, self).__init__()
self.configs = configs
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
# self.imfs = []
self.sift = cv2.SIFT_create()
imdir = Path(self.configs['data_path'])
self.imfs = imdir.glob('*/*.ppm')
self.imfs.sort()
def __getitem__(self, item):
imf = self.imfs[item]
im = io.imread(imf)
name = imf.split('/')[-2:]
name = '/'.join(name)
im_tensor = self.transform(im)
c, h, w = im_tensor.shape
pad=(0,0,0,0)
# now use crop to get suitable size
crop_r = w%16
crop_b = h%16
im_tensor = im_tensor[:,:h-crop_b,:w-crop_r]
im = im[:h-crop_b,:w-crop_r,:]
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
kpts = self.sift.detect(gray)
kpts = np.array([[kp.pt[0], kp.pt[1]] for kp in kpts])
coord = torch.from_numpy(kpts).float()
out = {'im1': im_tensor, 'im1_ori':im, 'coord1': coord, 'name1': name, 'pad1':pad}
return out
def __len__(self):
return len(self.imfs) | 1,618 | 33.446809 | 93 | py |
PoSFeat | PoSFeat-main/datasets/megadepth.py | import torch
from torch.utils.data import Dataset
import os
import numpy as np
import cv2
import skimage.io as io
import torchvision.transforms as transforms
# import utils
import collections
from tqdm import tqdm
from path import Path
import datasets.data_utils as data_utils
rand = np.random.RandomState(234)
class MegaDepth_superpoint(Dataset):
def __init__(self, configs, is_train=True):
super(MegaDepth_superpoint, self).__init__()
if is_train:
self.transform = transforms.Compose([transforms.ToPILImage(),
transforms.ColorJitter
(brightness=1, contrast=1, saturation=1, hue=0.4),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
else:
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
self.is_train = is_train
self.configs = configs
self.root = Path(self.configs['data_path'])
self.images = self.read_img_cam()
self.imf1s, self.imf2s = self.read_pairs()
print('total number of image pairs loaded: {}'.format(len(self.imf1s)))
# shuffle data
index = np.arange(len(self.imf1s))
rand.shuffle(index)
self.imf1s = list(np.array(self.imf1s)[index])
self.imf2s = list(np.array(self.imf2s)[index])
def read_img_cam(self):
images = {}
Image = collections.namedtuple(
"Image", ["name", "w", "h", "fx", "fy", "cx", "cy", "rvec", "tvec"])
for scene_id in os.listdir(self.root):
densefs = [f for f in os.listdir(os.path.join(self.root, scene_id))
if 'dense' in f and os.path.isdir(os.path.join(self.root, scene_id, f))]
for densef in densefs:
folder = self.root/'{}/{}/aligned'.format(scene_id, densef) #os.path.join(self.root, scene_id, densef, 'aligned')
img_cam_txt_path = folder/'img_cam.txt' #os.path.join(folder, 'img_cam.txt')
with open(img_cam_txt_path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_name = elems[0]
img_path = folder/'images/'+image_name #os.path.join(folder, 'images', image_name)
w, h = int(elems[1]), int(elems[2])
fx, fy = float(elems[3]), float(elems[4])
cx, cy = float(elems[5]), float(elems[6])
R = np.array(elems[7:16])
T = np.array(elems[16:19])
if self.is_train:
label = self.data_dict[scene_id]
images[img_path] = Image(
name=image_name, w=w, h=h, fx=fx, fy=fy, cx=cx, cy=cy, rvec=R, tvec=T
)
else:
images[img_path] = Image(
name=image_name, w=w, h=h, fx=fx, fy=fy, cx=cx, cy=cy, rvec=R, tvec=T
)
return images
def read_pairs(self):
imf1s, imf2s = [], []
print('reading image pairs from {}...'.format(self.root))
for scene_id in tqdm(os.listdir(self.root), desc='# loading data from scene folders'):
densefs = [f for f in os.listdir(os.path.join(self.root, scene_id))
if 'dense' in f and os.path.isdir(os.path.join(self.root, scene_id, f))]
for densef in densefs:
imf1s_ = []
imf2s_ = []
folder = self.root/'{}/{}/aligned'.format(scene_id, densef)
pairf = folder/'pairs.txt' #os.path.join(folder, 'pairs.txt')
if os.path.exists(pairf):
f = open(pairf, 'r')
for line in f:
imf1, imf2 = line.strip().split(' ')
imf1s_.append(folder/'images/'+imf1)
imf2s_.append(folder/'images/'+imf2)
# imf1s_.append(os.path.join(folder, 'images', imf1))
# imf2s_.append(os.path.join(folder, 'images', imf2))
# make # image pairs per scene more balanced
if len(imf1s_) > 5000:
index = np.arange(len(imf1s_))
rand.shuffle(index)
imf1s_ = list(np.array(imf1s_)[index[:5000]])
imf2s_ = list(np.array(imf2s_)[index[:5000]])
imf1s.extend(imf1s_)
imf2s.extend(imf2s_)
return imf1s, imf2s
@staticmethod
def get_intrinsics(im_meta):
return np.array([[im_meta.fx, 0, im_meta.cx],
[0, im_meta.fy, im_meta.cy],
[0, 0, 1]])
@staticmethod
def get_point_labels(file_path):
label_root = file_path.dirname().dirname()
name = file_path.name().replace('jpg','npz')
label_file = np.load(label_root/name)['pts']
label_file = label_file[:,:2]
return label_file
@staticmethod
def get_extrinsics(im_meta):
R = im_meta.rvec.reshape(3, 3)
t = im_meta.tvec
extrinsic = np.eye(4)
extrinsic[:3, :3] = R
extrinsic[:3, 3] = t
return extrinsic
def __getitem__(self, item):
imf1 = self.imf1s[item]
imf2 = self.imf2s[item]
im1_meta = self.images[imf1]
im2_meta = self.images[imf2]
im1 = io.imread(imf1)
im2 = io.imread(imf2)
h, w = im1.shape[:2]
intrinsic1 = self.get_intrinsics(im1_meta)
intrinsic2 = self.get_intrinsics(im2_meta)
extrinsic1 = self.get_extrinsics(im1_meta)
extrinsic2 = self.get_extrinsics(im2_meta)
relative = extrinsic2.dot(np.linalg.inv(extrinsic1))
R = relative[:3, :3]
# remove pairs that have a relative rotation angle larger than 80 degrees
theta = np.arccos(np.clip((np.trace(R) - 1) / 2, -1, 1)) * 180 / np.pi
if theta > self.configs['rot_thr'] and self.is_train:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
T = relative[:3, 3]
tx = data_utils.skew(T)
E_gt = np.dot(tx, R)
F_gt = np.linalg.inv(intrinsic2).T.dot(E_gt).dot(np.linalg.inv(intrinsic1))
relative2 = extrinsic1.dot(np.linalg.inv(extrinsic2))
R2 = relative2[:3, :3]
# remove pairs that have a relative rotation angle larger than 80 degrees
theta2 = np.arccos(np.clip((np.trace(R2) - 1) / 2, -1, 1)) * 180 / np.pi
if theta2 > self.configs['rot_thr'] and self.is_train:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
T2 = relative2[:3, 3]
tx2 = data_utils.skew(T2)
E_gt2 = np.dot(tx2, R2)
F_gt2 = np.linalg.inv(intrinsic1).T.dot(E_gt2).dot(np.linalg.inv(intrinsic2))
# generate candidate query points
# coord1 = data_utils.generate_query_kpts(im1, self.args.train_kp, 10*self.args.num_pts, h, w)
coord1 = self.get_point_labels(imf1)
coord2 = self.get_point_labels(imf2)
# if no keypoints are detected
if len(coord1) == 0 or len(coord2) == 0:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
# prune query keypoints that are not likely to have correspondence in the other image
if self.configs['prune_kp']:
ind_intersect = data_utils.prune_kpts(coord1[:,:2], F_gt, im2.shape[:2], intrinsic1, intrinsic2,
relative, d_min=4, d_max=400)
if np.sum(ind_intersect) == 0:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
coord1 = coord1[ind_intersect]
ind_intersect2 = data_utils.prune_kpts(coord2[:,:2], F_gt2, im1.shape[:2], intrinsic2, intrinsic1,
relative2, d_min=4, d_max=400)
if np.sum(ind_intersect2) == 0:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
coord2 = coord2[ind_intersect2]
if len(coord1) < self.configs['num_pts'] or len(coord2) < self.configs['num_pts']:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
coord1 = data_utils.random_choice(coord1, self.configs['num_pts'])
coord1 = torch.from_numpy(coord1).float()
coord2 = data_utils.random_choice(coord2, self.configs['num_pts'])
coord2 = torch.from_numpy(coord2).float()
im1_ori, im2_ori = torch.from_numpy(im1), torch.from_numpy(im2)
F_gt = torch.from_numpy(F_gt).float() / (F_gt[-1, -1] + 1e-10)
F_gt2 = torch.from_numpy(F_gt2).float() / (F_gt2[-1, -1] + 1e-10)
intrinsic1 = torch.from_numpy(intrinsic1).float()
intrinsic2 = torch.from_numpy(intrinsic2).float()
pose = torch.from_numpy(relative[:3, :]).float()
pose2 = torch.from_numpy(relative2[:3, :]).float()
im1_tensor = self.transform(im1)
im2_tensor = self.transform(im2)
out = {'im1': im1_tensor,
'im2': im2_tensor,
'im1_ori': im1_ori,
'im2_ori': im2_ori,
'pose1': pose,
'pose2': pose2,
'F1': F_gt,
'F2': F_gt2,
'intrinsic1': intrinsic1,
'intrinsic2': intrinsic2,
'coord1': coord1,
'coord2': coord2}
return out
def __len__(self):
return len(self.imf1s)
class MegaDepth_SIFT(Dataset):
def __init__(self, configs, is_train=True):
super(MegaDepth_SIFT, self).__init__()
if is_train:
self.transform = transforms.Compose([transforms.ToPILImage(),
transforms.ColorJitter
(brightness=1, contrast=1, saturation=1, hue=0.4),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
else:
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
self.is_train = is_train
self.configs = configs
self.root = Path(self.configs['data_path'])
self.images = self.read_img_cam()
self.imf1s, self.imf2s = self.read_pairs()
print('total number of image pairs loaded: {}'.format(len(self.imf1s)))
# shuffle data
index = np.arange(len(self.imf1s))
rand.shuffle(index)
self.imf1s = list(np.array(self.imf1s)[index])
self.imf2s = list(np.array(self.imf2s)[index])
def read_img_cam(self):
images = {}
Image = collections.namedtuple(
"Image", ["name", "w", "h", "fx", "fy", "cx", "cy", "rvec", "tvec"])
for scene_id in self.root.listdir():
if not scene_id.isdir():
continue
densefs = [f.name for f in scene_id.listdir()
if 'dense' in f.name and f.isdir()]
for densef in densefs:
folder = scene_id/'{}/aligned'.format(densef) #os.path.join(self.root, scene_id, densef, 'aligned')
img_cam_txt_path = folder/'img_cam.txt' #os.path.join(folder, 'img_cam.txt')
with open(img_cam_txt_path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_name = elems[0]
img_path = folder/'images/'+image_name #os.path.join(folder, 'images', image_name)
w, h = int(elems[1]), int(elems[2])
fx, fy = float(elems[3]), float(elems[4])
cx, cy = float(elems[5]), float(elems[6])
R = np.array(elems[7:16])
T = np.array(elems[16:19])
images[img_path] = Image(
name=image_name, w=w, h=h, fx=fx, fy=fy, cx=cx, cy=cy, rvec=R, tvec=T
)
return images
def read_pairs(self):
imf1s, imf2s = [], []
print('reading image pairs from {}...'.format(self.root))
for scene_id in tqdm(self.root.listdir(), desc='# loading data from scene folders'):
if not scene_id.isdir():
continue
densefs = [f.name for f in scene_id.listdir()
if 'dense' in f.name and f.isdir()]
for densef in densefs:
imf1s_ = []
imf2s_ = []
folder = scene_id/'{}/aligned'.format(densef)
pairf = folder/'pairs.txt' #os.path.join(folder, 'pairs.txt')
if os.path.exists(pairf):
f = open(pairf, 'r')
for line in f:
imf1, imf2 = line.strip().split(' ')
imf1s_.append(folder/'images/'+imf1)
imf2s_.append(folder/'images/'+imf2)
# imf1s_.append(os.path.join(folder, 'images', imf1))
# imf2s_.append(os.path.join(folder, 'images', imf2))
# make # image pairs per scene more balanced
if len(imf1s_) > 5000:
index = np.arange(len(imf1s_))
rand.shuffle(index)
imf1s_ = list(np.array(imf1s_)[index[:5000]])
imf2s_ = list(np.array(imf2s_)[index[:5000]])
imf1s.extend(imf1s_)
imf2s.extend(imf2s_)
return imf1s, imf2s
@staticmethod
def get_intrinsics(im_meta):
return np.array([[im_meta.fx, 0, im_meta.cx],
[0, im_meta.fy, im_meta.cy],
[0, 0, 1]])
# @staticmethod
def generate_query_kpts(self, img, num_pts, h, w, mode='mixed'):
# generate candidate query points
if mode == 'random':
kp1_x = np.random.rand(num_pts) * (w - 1)
kp1_y = np.random.rand(num_pts) * (h - 1)
coord = np.stack((kp1_x, kp1_y, np.zeros(kp1_x.shape))).T
elif mode == 'sift':
gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_pts)
sift = cv2.SIFT_create(nfeatures=num_pts)
kp1 = sift.detect(gray1)
coord = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1])
elif mode == 'mixed':
kp1_x = np.random.rand(1 * int(self.configs['random_percent'] * num_pts)) * (w - 1)
kp1_y = np.random.rand(1 * int(self.configs['random_percent'] * num_pts)) * (h - 1)
kp1_rand = np.stack((kp1_x, kp1_y, np.zeros(kp1_x.shape))).T
# sift = cv2.xfeatures2d.SIFT_create(nfeatures=int(0.5 * num_pts))
sift = cv2.SIFT_create(nfeatures=int((1-self.configs['random_percent']) * num_pts))
# sift = cv2.SIFT_create()
gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kp1_sift = sift.detect(gray1)
kp1_sift = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1_sift])
if len(kp1_sift) == 0:
coord = kp1_rand
else:
coord = np.concatenate((kp1_rand, kp1_sift), 0)
else:
raise Exception('unknown type of keypoints')
return coord
@staticmethod
def get_extrinsics(im_meta):
R = im_meta.rvec.reshape(3, 3)
t = im_meta.tvec
extrinsic = np.eye(4)
extrinsic[:3, :3] = R
extrinsic[:3, 3] = t
return extrinsic
def get_data_aug(self, item):
if torch.rand(1) < 0.5:
imf1 = self.imf1s[item]
else:
imf1 = self.imf2s[item]
im1_meta = self.images[imf1]
im1 = io.imread(imf1)
def __getitem__(self, item):
imf1 = self.imf1s[item]
imf2 = self.imf2s[item]
im1_meta = self.images[imf1]
im2_meta = self.images[imf2]
im1 = io.imread(imf1)
im2 = io.imread(imf2)
h1, w1 = im1.shape[:2]
h2, w2 = im2.shape[:2]
intrinsic1 = self.get_intrinsics(im1_meta)
intrinsic2 = self.get_intrinsics(im2_meta)
extrinsic1 = self.get_extrinsics(im1_meta)
extrinsic2 = self.get_extrinsics(im2_meta)
relative = extrinsic2.dot(np.linalg.inv(extrinsic1))
R = relative[:3, :3]
# remove pairs that have a relative rotation angle larger than 80 degrees
theta = np.arccos(np.clip((np.trace(R) - 1) / 2, -1, 1)) * 180 / np.pi
if theta > self.configs['rot_thr'] and self.is_train:
return None
T = relative[:3, 3]
tx = data_utils.skew(T)
E_gt = np.dot(tx, R)
F_gt = np.linalg.inv(intrinsic2).T.dot(E_gt).dot(np.linalg.inv(intrinsic1))
relative2 = extrinsic1.dot(np.linalg.inv(extrinsic2))
R2 = relative2[:3, :3]
# remove pairs that have a relative rotation angle larger than 80 degrees
theta2 = np.arccos(np.clip((np.trace(R2) - 1) / 2, -1, 1)) * 180 / np.pi
if theta2 > self.configs['rot_thr'] and self.is_train:
return None
T2 = relative2[:3, 3]
tx2 = data_utils.skew(T2)
E_gt2 = np.dot(tx2, R2)
F_gt2 = np.linalg.inv(intrinsic1).T.dot(E_gt2).dot(np.linalg.inv(intrinsic2))
# generate candidate query points
# coord1 = data_utils.generate_query_kpts(im1, self.args.train_kp, 10*self.args.num_pts, h, w)
coord1 = self.generate_query_kpts(im1, 10*self.configs['num_pts'], h1, w1)
coord2 = self.generate_query_kpts(im2, 10*self.configs['num_pts'], h2, w2)
# if no keypoints are detected
if len(coord1) == 0 or len(coord2) == 0:
return None
# prune query keypoints that are not likely to have correspondence in the other image
if self.configs['prune_kp']:
ind_intersect = data_utils.prune_kpts(coord1[:,:2], F_gt, im2.shape[:2], intrinsic1, intrinsic2,
relative, d_min=4, d_max=400)
if np.sum(ind_intersect) == 0:
return None
coord1 = coord1[ind_intersect]
ind_intersect2 = data_utils.prune_kpts(coord2[:,:2], F_gt2, im1.shape[:2], intrinsic2, intrinsic1,
relative2, d_min=4, d_max=400)
if np.sum(ind_intersect2) == 0:
return None
coord2 = coord2[ind_intersect2]
if len(coord1) < self.configs['num_pts'] or len(coord2) < self.configs['num_pts']:
return None
coord1 = data_utils.random_choice(coord1, self.configs['num_pts'])
coord1 = torch.from_numpy(coord1).float()
coord2 = data_utils.random_choice(coord2, self.configs['num_pts'])
coord2 = torch.from_numpy(coord2).float()
im1_ori, im2_ori = torch.from_numpy(im1), torch.from_numpy(im2)
F_gt = torch.from_numpy(F_gt).float() / (F_gt[-1, -1] + 1e-10)
F_gt2 = torch.from_numpy(F_gt2).float() / (F_gt2[-1, -1] + 1e-10)
intrinsic1 = torch.from_numpy(intrinsic1).float()
intrinsic2 = torch.from_numpy(intrinsic2).float()
pose = torch.from_numpy(relative[:3, :]).float()
pose2 = torch.from_numpy(relative2[:3, :]).float()
im1_tensor = self.transform(im1)
im2_tensor = self.transform(im2)
no_cuda = ('name1', 'name2')
out = {'im1': im1_tensor,
'im2': im2_tensor,
'im1_ori': im1_ori,
'im2_ori': im2_ori,
'pose1': pose,
'pose2': pose2,
'F1': F_gt,
'F2': F_gt2,
'intrinsic1': intrinsic1,
'intrinsic2': intrinsic2,
'coord1': coord1,
'coord2': coord2,
'name1':im1_meta.name,
'name2':im2_meta.name}
return out
def __len__(self):
return len(self.imf1s)
class MegaDepth_Depth(Dataset):
def __init__(self, configs, is_train=True):
super(MegaDepth_Depth, self).__init__()
if is_train:
self.transform = transforms.Compose([transforms.ToPILImage(),
transforms.ColorJitter
(brightness=1, contrast=1, saturation=1, hue=0.4),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
else:
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
self.is_train = is_train
self.configs = configs
self.root = Path(self.configs['data_path'])
self.images = self.read_img_cam()
self.imf1s, self.imf2s = self.read_pairs()
print('total number of image pairs loaded: {}'.format(len(self.imf1s)))
# shuffle data
index = np.arange(len(self.imf1s))
rand.shuffle(index)
self.imf1s = list(np.array(self.imf1s)[index])
self.imf2s = list(np.array(self.imf2s)[index])
def read_img_cam(self):
images = {}
Image = collections.namedtuple(
"Image", ["name", "w", "h", "fx", "fy", "cx", "cy", "rvec", "tvec", "depth"])
for scene_id in self.root.listdir():
if not scene_id.isdir():
continue
densefs = [f.name for f in scene_id.listdir()
if 'dense' in f.name and f.isdir()]
for densef in densefs:
folder = scene_id/'{}/aligned'.format(densef) #os.path.join(self.root, scene_id, densef, 'aligned')
img_cam_txt_path = folder/'img_cam.txt' #os.path.join(folder, 'img_cam.txt')
with open(img_cam_txt_path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_name = elems[0]
img_path = folder/'images/'+image_name #os.path.join(folder, 'images', image_name)
depth = folder/'depths/'+image_name.replace('.jpg','.h5')
w, h = int(elems[1]), int(elems[2])
fx, fy = float(elems[3]), float(elems[4])
cx, cy = float(elems[5]), float(elems[6])
R = np.array(elems[7:16])
T = np.array(elems[16:19])
images[img_path] = Image(
name=image_name, w=w, h=h, fx=fx, fy=fy, cx=cx, cy=cy, rvec=R, tvec=T, depth=depth_path
)
return images
def read_pairs(self):
imf1s, imf2s = [], []
print('reading image pairs from {}...'.format(self.root))
for scene_id in tqdm(self.root.listdir(), desc='# loading data from scene folders'):
if not scene_id.isdir():
continue
densefs = [f.name for f in scene_id.listdir()
if 'dense' in f.name and f.isdir()]
for densef in densefs:
imf1s_ = []
imf2s_ = []
folder = scene_id/'{}/aligned'.format(densef)
pairf = folder/'pairs.txt' #os.path.join(folder, 'pairs.txt')
if os.path.exists(pairf):
f = open(pairf, 'r')
for line in f:
imf1, imf2 = line.strip().split(' ')
imf1s_.append(folder/'images/'+imf1)
imf2s_.append(folder/'images/'+imf2)
# imf1s_.append(os.path.join(folder, 'images', imf1))
# imf2s_.append(os.path.join(folder, 'images', imf2))
# make # image pairs per scene more balanced
if len(imf1s_) > 5000:
index = np.arange(len(imf1s_))
rand.shuffle(index)
imf1s_ = list(np.array(imf1s_)[index[:5000]])
imf2s_ = list(np.array(imf2s_)[index[:5000]])
imf1s.extend(imf1s_)
imf2s.extend(imf2s_)
return imf1s, imf2s
@staticmethod
def get_intrinsics(im_meta):
return np.array([[im_meta.fx, 0, im_meta.cx],
[0, im_meta.fy, im_meta.cy],
[0, 0, 1]])
# @staticmethod
def generate_query_kpts(self, img, num_pts, h, w, mode='mixed'):
"""
Although we define this function, the key points here are not used. Actually, the keypoints used during training
are generated in the preprocess step.
我们参照caps的代码定义了这个函数,以方便进行ablation,但我们的方法并不会用到这里的关键点,训练中的关键点是在
preprocess步骤中生成的
"""
# generate candidate query points
if mode == 'random':
kp1_x = np.random.rand(num_pts) * (w - 1)
kp1_y = np.random.rand(num_pts) * (h - 1)
coord = np.stack((kp1_x, kp1_y, np.zeros(kp1_x.shape))).T
elif mode == 'sift':
gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_pts)
sift = cv2.SIFT_create(nfeatures=num_pts)
kp1 = sift.detect(gray1)
coord = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1])
elif mode == 'mixed':
kp1_x = np.random.rand(1 * int(self.configs['random_percent'] * num_pts)) * (w - 1)
kp1_y = np.random.rand(1 * int(self.configs['random_percent'] * num_pts)) * (h - 1)
kp1_rand = np.stack((kp1_x, kp1_y, np.zeros(kp1_x.shape))).T
# sift = cv2.xfeatures2d.SIFT_create(nfeatures=int(0.5 * num_pts))
sift = cv2.SIFT_create(nfeatures=int((1-self.configs['random_percent']) * num_pts))
# sift = cv2.SIFT_create()
gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kp1_sift = sift.detect(gray1)
kp1_sift = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1_sift])
if len(kp1_sift) == 0:
coord = kp1_rand
else:
coord = np.concatenate((kp1_rand, kp1_sift), 0)
else:
raise Exception('unknown type of keypoints')
return coord
@staticmethod
def get_extrinsics(im_meta):
R = im_meta.rvec.reshape(3, 3)
t = im_meta.tvec
extrinsic = np.eye(4)
extrinsic[:3, :3] = R
extrinsic[:3, 3] = t
return extrinsic
def get_data_aug(self, item):
if torch.rand(1) < 0.5:
imf1 = self.imf1s[item]
else:
imf1 = self.imf2s[item]
im1_meta = self.images[imf1]
im1 = io.imread(imf1)
def __getitem__(self, item):
imf1 = self.imf1s[item]
imf2 = self.imf2s[item]
im1_meta = self.images[imf1]
im2_meta = self.images[imf2]
im1 = io.imread(imf1)
im2 = io.imread(imf2)
h1, w1 = im1.shape[:2]
h2, w2 = im2.shape[:2]
intrinsic1 = self.get_intrinsics(im1_meta)
intrinsic2 = self.get_intrinsics(im2_meta)
extrinsic1 = self.get_extrinsics(im1_meta)
extrinsic2 = self.get_extrinsics(im2_meta)
relative = extrinsic2.dot(np.linalg.inv(extrinsic1))
R = relative[:3, :3]
# remove pairs that have a relative rotation angle larger than 80 degrees
theta = np.arccos(np.clip((np.trace(R) - 1) / 2, -1, 1)) * 180 / np.pi
if theta > self.configs['rot_thr'] and self.is_train:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
T = relative[:3, 3]
tx = data_utils.skew(T)
E_gt = np.dot(tx, R)
F_gt = np.linalg.inv(intrinsic2).T.dot(E_gt).dot(np.linalg.inv(intrinsic1))
relative2 = extrinsic1.dot(np.linalg.inv(extrinsic2))
R2 = relative2[:3, :3]
# remove pairs that have a relative rotation angle larger than 80 degrees
theta2 = np.arccos(np.clip((np.trace(R2) - 1) / 2, -1, 1)) * 180 / np.pi
if theta2 > self.configs['rot_thr'] and self.is_train:
return None
T2 = relative2[:3, 3]
tx2 = data_utils.skew(T2)
E_gt2 = np.dot(tx2, R2)
F_gt2 = np.linalg.inv(intrinsic1).T.dot(E_gt2).dot(np.linalg.inv(intrinsic2))
# generate candidate query points
# coord1 = data_utils.generate_query_kpts(im1, self.args.train_kp, 10*self.args.num_pts, h, w)
coord1 = self.generate_query_kpts(im1, 10*self.configs['num_pts'], h1, w1)
coord2 = self.generate_query_kpts(im2, 10*self.configs['num_pts'], h2, w2)
# if no keypoints are detected
if len(coord1) == 0 or len(coord2) == 0:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
# prune query keypoints that are not likely to have correspondence in the other image
if self.configs['prune_kp']:
ind_intersect = data_utils.prune_kpts(coord1[:,:2], F_gt, im2.shape[:2], intrinsic1, intrinsic2,
relative, d_min=4, d_max=400)
if np.sum(ind_intersect) == 0:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
coord1 = coord1[ind_intersect]
ind_intersect2 = data_utils.prune_kpts(coord2[:,:2], F_gt2, im1.shape[:2], intrinsic2, intrinsic1,
relative2, d_min=4, d_max=400)
if np.sum(ind_intersect2) == 0:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
coord2 = coord2[ind_intersect2]
if len(coord1) < self.configs['num_pts'] or len(coord2) < self.configs['num_pts']:
item += 1
if item >= self.__len__():
item =0
return self.__getitem__(item)
coord1 = data_utils.random_choice(coord1, self.configs['num_pts'])
coord1 = torch.from_numpy(coord1).float()
coord2 = data_utils.random_choice(coord2, self.configs['num_pts'])
coord2 = torch.from_numpy(coord2).float()
im1_ori, im2_ori = torch.from_numpy(im1), torch.from_numpy(im2)
F_gt = torch.from_numpy(F_gt).float() / (F_gt[-1, -1] + 1e-10)
F_gt2 = torch.from_numpy(F_gt2).float() / (F_gt2[-1, -1] + 1e-10)
intrinsic1 = torch.from_numpy(intrinsic1).float()
intrinsic2 = torch.from_numpy(intrinsic2).float()
pose = torch.from_numpy(relative[:3, :]).float()
pose2 = torch.from_numpy(relative2[:3, :]).float()
im1_tensor = self.transform(im1)
im2_tensor = self.transform(im2)
no_cuda = ('name1', 'name2')
depth = h5py.File(im1_meta.depth, 'r')['depth'][:]
depth = cv2.resize(depth, (640,480))
depth = torch.from_numpy(depth).float()
out = {'im1': im1_tensor,
'im2': im2_tensor,
'im1_ori': im1_ori,
'im2_ori': im2_ori,
'pose1': pose,
'pose2': pose2,
'F1': F_gt,
'F2': F_gt2,
'intrinsic1': intrinsic1,
'intrinsic2': intrinsic2,
'coord1': coord1,
'coord2': coord2,
'name1':im1_meta.name,
'name2':im2_meta.name,
'depth': depth}
return out
def __len__(self):
return len(self.imf1s) | 34,226 | 41.837297 | 129 | py |
PoSFeat | PoSFeat-main/losses/epipolarloss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .preprocess_utils import *
class EpipolarLoss_full(nn.Module):
def __init__(self, configs, device=None):
super(EpipolarLoss_full, self).__init__()
self.__lossname__ = 'EpipolarLoss_fullinfo'
self.config = configs
self.w_g = self.config['weight_grid']
self.w_w = self.config['weight_window']
def epipolar_cost(self, coord1, coord2, fmatrix, im_size):
coord1_h = homogenize(coord1).transpose(1, 2)
coord2_h = homogenize(coord2).transpose(1, 2)
epipolar_line = fmatrix.bmm(coord1_h) # Bx3xn
epipolar_line_ = epipolar_line / torch.clamp(torch.norm(epipolar_line[:, :2, :], dim=1, keepdim=True), min=1e-8)
essential_cost = torch.abs(torch.sum(coord2_h * epipolar_line_, dim=1)) # Bxn
return essential_cost
def set_weight(self, inverse_std, mask=None, regularizer=0.0):
if self.config['use_std_as_weight']:
# inverse_std = 1. / torch.clamp(std+regularizer, min=1e-10)
weight = inverse_std / torch.mean(inverse_std)
weight = weight.detach() # Bxn
else:
weight = torch.ones_like(std)
if mask is not None:
weight *= mask.float()
weight /= (torch.mean(weight) + 1e-8)
return weight
def forward(self, inputs, outputs, processed):
coord1 = processed['coord1']
coord2 = processed['coord2']
temperature = processed['temperature']
feat1g_corloc = processed['feat1g_corloc']
feat2g_corloc = processed['feat2g_corloc']
feat1w_corloc = processed['feat1w_corloc']
feat2w_corloc = processed['feat2w_corloc']
feat1g_std = processed['feat1g_std']
feat2g_std = processed['feat2g_std']
feat1w_std = processed['feat1w_std']
feat2w_std = processed['feat2w_std']
Fmat1 = inputs['F1']
Fmat2 = inputs['F2']
im_size1 = inputs['im1'].size()[2:]
im_size2 = inputs['im2'].size()[2:]
shorter_edge, longer_edge = min(im_size1), max(im_size1)
cost_g1 = self.epipolar_cost(coord1, feat1g_corloc, Fmat1, im_size1)
cost_w1 = self.epipolar_cost(coord1, feat1w_corloc, Fmat1, im_size1)
cost_g2 = self.epipolar_cost(coord2, feat2g_corloc, Fmat2, im_size2)
cost_w2 = self.epipolar_cost(coord2, feat2w_corloc, Fmat2, im_size2)
# filter out the large values, similar to CAPS
# 去除异常loss,参考CAPS
mask_g1 = cost_g1 < (shorter_edge*self.config['grid_cost_thr'])
mask_w1 = cost_w1 < (shorter_edge*self.config['win_cost_thr'])
mask_g2 = cost_g2 < (shorter_edge*self.config['grid_cost_thr'])
mask_w2 = cost_w2 < (shorter_edge*self.config['win_cost_thr'])
if 'valid_epi1' in list(processed.keys()):
mask_g1 = mask_g1 & processed['valid_epi1']
mask_w1 = mask_w1 & processed['valid_epi1']
mask_g2 = mask_g2 & processed['valid_epi2']
mask_w2 = mask_w2 & processed['valid_epi2']
weight_w1 = 1
weight_w2 = 1
weight_g1 = self.set_weight(1/feat1g_std.clamp(min=1e-10), mask_g1)
weight_w1 = self.set_weight(weight_w1/feat1w_std.clamp(min=1e-10), mask_w1)
weight_g2 = self.set_weight(1/feat2g_std.clamp(min=1e-10), mask_g2)
weight_w2 = self.set_weight(weight_w2/feat2w_std.clamp(min=1e-10), mask_w2)
loss_g1 = (weight_g1*cost_g1).mean()
loss_w1 = (weight_w1*cost_w1).mean()
loss_g2 = (weight_g2*cost_g2).mean()
loss_w2 = (weight_w2*cost_w2).mean()
loss = self.w_g*(loss_g1+loss_g2)+self.w_w*(loss_w1+loss_w2)
percent_g = (mask_g1.sum()/(mask_g1.shape[0]*mask_g1.shape[1]) + mask_g2.sum()/(mask_g2.shape[0]*mask_g2.shape[1]))/2
percent_w = (mask_w1.sum()/(mask_w1.shape[0]*mask_w1.shape[1]) + mask_w2.sum()/(mask_w2.shape[0]*mask_w2.shape[1]))/2
components = {
'loss_g1': loss_g1, 'loss_w1':loss_w1,
'loss_g2':loss_g2, 'loss_w2':loss_w2,
'percent_g':percent_g, 'percent_w':percent_w
}
return loss, components | 4,185 | 40.445545 | 126 | py |
PoSFeat | PoSFeat-main/losses/kploss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .preprocess_utils import *
from torch.distributions import Categorical, Bernoulli
class DiskLoss(nn.Module):
def __init__(self, configs, device=None):
super(DiskLoss, self).__init__()
self.__lossname__ = 'DiskLoss'
self.config = configs
self.unfold_size = self.config['grid_size']
self.t_base = self.config['temperature_base']
self.t_max = self.config['temperature_max']
self.reward = getattr(self, self.config['epipolar_reward'])
self.good_reward = self.config['good_reward']
self.bad_reward = self.config['bad_reward']
self.kp_penalty = self.config['kp_penalty']
def point_distribution(self, logits):
proposal_dist = Categorical(logits=logits) # bx1x(h//g)x(w//g)x(g*g)
proposals = proposal_dist.sample() # bx1x(h//g)x(w//g)
proposal_logp = proposal_dist.log_prob(proposals) # bx1x(h//g)x(w//g)
# accept_logits = select_on_last(logits, proposals).squeeze(-1)
accept_logits = torch.gather(logits, dim=-1, index=proposals[..., None]).squeeze(-1) # bx1x(h//g)x(w//g)
accept_dist = Bernoulli(logits=accept_logits)
accept_samples = accept_dist.sample() # bx1x(h//g)x(w//g)
accept_logp = accept_dist.log_prob(accept_samples) # for accepted points, equals to sigmoid() then log(); for denied, (1-sigmoid).log
accept_mask = accept_samples == 1.
logp = proposal_logp + accept_logp
return proposals, accept_mask, logp
def point_sample(self, kp_map):
kpmap_unfold = unfold(kp_map, self.unfold_size)
proposals, accept_mask, logp = self.point_distribution(kpmap_unfold)
b, _, h, w = kp_map.shape
grids_org = gen_grid(h_min=0, h_max=h-1, w_min=0, w_max=w-1, len_h=h, len_w=w)
grids_org = grids_org.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)
grids_org = grids_org.permute(0,3,1,2) # bx2xhxw
grids_unfold = unfold(grids_org, self.unfold_size) # bx2x(h//g)x(w//g)x(g*g)
kps = grids_unfold.gather(dim=4, index=proposals.unsqueeze(-1).repeat(1,2,1,1,1))
return kps.squeeze(4).permute(0,2,3,1), logp, accept_mask
@ torch.no_grad()
def constant_reward(self, inputs, outputs, coord1, coord2, reward_thr, rescale_thr):
coord1_h = homogenize(coord1).transpose(1, 2) #bx3xm
coord2_h = homogenize(coord2).transpose(1, 2) #bx3xn
fmatrix = inputs['F1']
fmatrix2 = inputs['F2']
# compute the distance of the points in the second image
epipolar_line = fmatrix.bmm(coord1_h)
epipolar_line_ = epipolar_line / torch.clamp(
torch.norm(epipolar_line[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)
epipolar_dist = torch.abs(epipolar_line_.transpose(1, 2)@coord2_h) #bxmxn
# compute the distance of the points in the first image
epipolar_line2 = fmatrix2.bmm(coord2_h)
epipolar_line2_ = epipolar_line2 / torch.clamp(
torch.norm(epipolar_line2[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)
epipolar_dist2 = torch.abs(epipolar_line2_.transpose(1, 2)@coord1_h) #bxnxm
epipolar_dist2 = epipolar_dist2.transpose(1,2) #bxmxn
if rescale_thr:
b, _, _ = epipolar_dist.shape
dist1 = epipolar_dist.detach().reshape(b, -1).mean(1,True)
dist2 = epipolar_dist2.detach().reshape(b,-1).mean(1,True)
dist_ = torch.cat([dist1, dist2], dim=1)
scale1 = dist1/dist_.min(1,True)[0].clamp(1e-6)
scale2 = dist2/dist_.min(1,True)[0].clamp(1e-6)
thr1 = reward_thr*scale1
thr2 = reward_thr*scale2
thr1 = thr1.reshape(b,1,1)
thr2 = thr2.reshape(b,1,1)
else:
thr1 = reward_thr
thr2 = reward_thr
scale1 = epipolar_dist2.new_tensor(1.)
scale2 = epipolar_dist2.new_tensor(1.)
good = (epipolar_dist<thr1) & (epipolar_dist2<thr2)
reward = self.good_reward*good + self.bad_reward*(~good)
return reward, scale1, scale2
@ torch.no_grad()
def dynamic_reward(self, inputs, outputs, coord1, coord2, reward_thr, rescale_thr):
coord1_h = homogenize(coord1).transpose(1, 2) #bx3xm
coord2_h = homogenize(coord2).transpose(1, 2) #bx3xn
fmatrix = inputs['F1']
fmatrix2 = inputs['F2']
# compute the distance of the points in the second image
epipolar_line = fmatrix.bmm(coord1_h)
epipolar_line_ = epipolar_line / torch.clamp(
torch.norm(epipolar_line[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)
epipolar_dist = torch.abs(epipolar_line_.transpose(1, 2)@coord2_h) #bxmxn
# compute the distance of the points in the first image
epipolar_line2 = fmatrix2.bmm(coord2_h)
epipolar_line2_ = epipolar_line2 / torch.clamp(
torch.norm(epipolar_line2[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)
epipolar_dist2 = torch.abs(epipolar_line2_.transpose(1, 2)@coord1_h) #bxnxm
epipolar_dist2 = epipolar_dist2.transpose(1,2) #bxmxn
if rescale_thr:
b, _, _ = epipolar_dist.shape
dist1 = epipolar_dist.detach().reshape(b, -1).mean(1,True)
dist2 = epipolar_dist2.detach().reshape(b,-1).mean(1,True)
dist_ = torch.cat([dist1, dist2], dim=1)
scale1 = dist1/dist_.min(1,True)[0].clamp(1e-6)
scale2 = dist2/dist_.min(1,True)[0].clamp(1e-6)
thr1 = reward_thr*scale1
thr2 = reward_thr*scale2
thr1 = thr1.reshape(b,1,1)
thr2 = thr2.reshape(b,1,1)
else:
thr1 = reward_thr
thr2 = reward_thr
scale1 = epipolar_dist2.new_tensor(1.)
scale2 = epipolar_dist2.new_tensor(1.)
reward = torch.exp(-epipolar_dist/thr1) + torch.exp(-epipolar_dist2/thr2) - 2/torch.exp(torch.ones_like(epipolar_dist)).to(epipolar_dist)
reward = reward.clamp(min=self.bad_reward)
return reward, scale1, scale2
def forward(self, inputs, outputs, processed):
preds1 = outputs['preds1']
preds2 = outputs['preds2']
kp_map1, kp_map2 = preds1['local_point'], preds2['local_point']
xf1, xf2 = preds1['local_map'], preds2['local_map']
b,c,h4,w4 = xf1.shape
_, _, h, w = kp_map1.shape
temperature = min(self.t_base + outputs['epoch'], self.t_max)
coord1, logp1, accept_mask1 = self.point_sample(kp_map1) # bx(h//g)x(w//g)x2 bx1x(h//g)x(w//g) bx1x(h//g)x(w//g)
coord2, logp2, accept_mask2 = self.point_sample(kp_map2)
coord1 = coord1.reshape(b,-1,2)
coord2 = coord2.reshape(b,-1,2)
coord1_n = normalize_coords(coord1, h, w) # bx((h//g)*(w//g))x2
coord2_n = normalize_coords(coord2, h, w)
# feat1 = F.grid_sample(xf1, coord1_n, align_corners=False).reshape(b,c,-1) # bxcx((h//g)*(w//g))
# feat2 = F.grid_sample(xf2, coord2_n, align_corners=False).reshape(b,c,-1)
feat1 = sample_feat_by_coord(xf1, coord1_n, self.config['loss_distance']=='cos') #bxmxc
feat2 = sample_feat_by_coord(xf2, coord2_n, self.config['loss_distance']=='cos') #bxnxc
# matching
if self.config['match_grad']:
costs = 1-feat1@feat2.transpose(1,2) # bxmxn 0-2
else:
with torch.no_grad():
costs = 1-feat1@feat2.transpose(1,2) # bxmxn 0-2
affinity = -temperature * costs
cat_I = Categorical(logits=affinity)
cat_T = Categorical(logits=affinity.transpose(1,2))
dense_p = cat_I.probs * cat_T.probs.transpose(1,2)
dense_logp = cat_I.logits + cat_T.logits.transpose(1,2)
if self.config['cor_detach']:
sample_p = dense_p.detach()
else:
sample_p = dense_p
reward, scale1, scale2 = self.reward(inputs, outputs, coord1, coord2, **self.config['reward_config'])
kps_logp = logp1.reshape(b,1,-1).transpose(1,2) + logp2.reshape(b,1,-1) # bxmxn
sample_plogp = sample_p * (dense_logp + kps_logp)
accept_mask = accept_mask1.reshape(b,1,-1).transpose(1,2) * accept_mask2.reshape(b,1,-1) # bxmxn
reinforce = (reward[accept_mask] * sample_plogp[accept_mask]).sum()
kp_penalty = self.kp_penalty * (logp1[accept_mask1].sum()+logp2[accept_mask2].sum())
loss = -reinforce - kp_penalty
sample_p_detach = sample_p.detach()
components = {'reinforce':reinforce.detach(), 'kp_penalty': kp_penalty.detach(),
'scale1': scale1, 'scale2':scale2,
'cor minmax': sample_p_detach.view(b,-1).max(-1)[0].min(),
'cor minmean': sample_p_detach.view(b,-1).mean(-1).min(),
'cor max': sample_p_detach.max(),
'cor mean': sample_p_detach.mean(),
'cor summin': torch.min(sample_p_detach.sum(1).min(), sample_p_detach.sum(2).min()),
'cor summax': torch.max(sample_p_detach.sum(1).max(), sample_p_detach.sum(2).max()),
'n_kps': (accept_mask1.detach().reshape(b,1,-1).sum(-1) + accept_mask2.detach().reshape(b,1,-1).sum(-1)).float().mean(),
'n_pairs': sample_p.detach().sum(-1).sum(-1).mean(),
'temperature': sample_p_detach.new_tensor(temperature)
}
return loss, components | 9,442 | 46.93401 | 145 | py |
PoSFeat | PoSFeat-main/losses/preprocess_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Bernoulli
import math
import numpy as np
def homogenize(coord):
# coord = torch.cat((coord, torch.ones_like(coord[:, :, [0]])), -1)
coord = torch.cat((coord, torch.ones_like(coord[..., [0]])), -1)
return coord
def normalize_coords(coord, h, w):
'''
turn the coordinates from pixel indices to the range of [-1, 1]
:param coord: [..., 2]
:param h: the image height
:param w: the image width
:return: the normalized coordinates [..., 2]
'''
c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).to(coord.device).float()
# print(coord[:,:,0].max(), coord[:,:,1].max(), w, h)
coord_norm = (coord - c) / c
# print(coord_norm[:,:,0].max(), coord_norm[:,:,1].max(), coord_norm[:,:,0].min(), coord_norm[:,:,1].min())
return coord_norm
def denormalize_coords(coord_norm, h, w):
'''
turn the coordinates from normalized value ([-1, 1]) to actual pixel indices
:param coord_norm: [..., 2]
:param h: the image height
:param w: the image width
:return: actual pixel coordinates
'''
c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).to(coord_norm.device)
coord = coord_norm * c + c
return coord
def sample_feat_by_coord(x, coord_n, norm=False):
'''
sample from normalized coordinates
:param x: feature map [batch_size, n_dim, h, w]
:param coord_n: normalized coordinates, [batch_size, n_pts, 2]
:param norm: if l2 normalize features
:return: the extracted features, [batch_size, n_pts, n_dim]
'''
feat = F.grid_sample(x, coord_n.unsqueeze(2), padding_mode='zeros', align_corners=False).squeeze(-1)
# print(feat.shape)
if norm:
feat = F.normalize(feat, p=2, dim=1)
feat = feat.transpose(1, 2)
return feat
def get_expected_correspondence_locs(feat1, featmap2, with_std=False):
'''
compute the expected correspondence locations
:param feat1: the feature vectors of query points [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the reference image [batch_size, n_dim, h, w]
:param with_std: if return the standard deviation
:return: the normalized expected correspondence locations [batch_size, n_pts, 2]
'''
B, d, h2, w2 = featmap2.size()
grid_n = gen_grid(-1, 1, -1, 1, h2, w2).to(featmap2.device)
featmap2_flatten = featmap2.reshape(B, d, h2*w2).transpose(1, 2) # BX(hw)xd
prob = compute_prob(feat1, featmap2_flatten) # Bxnx(hw)
grid_n = grid_n.unsqueeze(0).unsqueeze(0) # 1x1x(hw)x2
expected_coord_n = torch.sum(grid_n * prob.unsqueeze(-1), dim=2) # Bxnx2
if with_std:
# convert to normalized scale [-1, 1]
var = torch.sum(grid_n**2 * prob.unsqueeze(-1), dim=2) - expected_coord_n**2 # Bxnx2
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # Bxn
# var_prob = (prob-prob.mean(-1,True)).square().sum(-1,True)/prob.shape[-1] # Bxnx1
# kurtosis = torch.pow(prob-prob.mean(-1,True),4).sum(-1,True)/(prob.shape[2]*var_prob**2)
kurtosis = torch.pow(grid_n-expected_coord_n.unsqueeze(-2), 4).mean(-2)/torch.pow(var, 2)
kurtosis = (kurtosis/10.).clamp(0,1)
# kurtosis = var
return expected_coord_n, std, kurtosis.mean(-1), prob#, var_prob
else:
return expected_coord_n
def gen_grid(h_min, h_max, w_min, w_max, len_h, len_w):
x, y = torch.meshgrid([torch.linspace(w_min, w_max, len_w), torch.linspace(h_min, h_max, len_h)])
grid = torch.stack((x, y), -1).transpose(0, 1).reshape(-1, 2).float()
return grid
def compute_prob(feat1, feat2, loss_distance='cos', with_scale=False, return_sim=False):
'''
compute probability
:param feat1: query features, [batch_size, m, n_dim]
:param feat2: reference features, [batch_size, n, n_dim]
:return: probability, [batch_size, m, n]
'''
assert loss_distance in ['cos', 'euc']
if return_sim:
assert loss_distance=='cos'
if loss_distance == 'cos':
sim = feat1.bmm(feat2.transpose(1, 2))
if with_scale:
scale = sim.new_tensor(feat2.shape[1])
scale = scale.sqrt()
else:
scale = 1
prob = F.softmax(scale*sim, dim=-1) # Bxmxn
else:
dist = torch.sum(feat1**2, dim=-1, keepdim=True) + \
torch.sum(feat2**2, dim=-1, keepdim=True).transpose(1, 2) - \
2 * feat1.bmm(feat2.transpose(1, 2))
prob = F.softmax(-dist, dim=-1) # Bxmxn
if return_sim:
return prob, sim
else:
return prob
def OT_sinkhorn_log(costs, iters=20, temperature=None):
'''
find the correspondece with sinkhorn algorithm
:param costs: [b, m, n]
:param iters: the number of iterations
:return: the optimized scores [b,m,n]
'''
b, m, n = costs.shape
one = costs.new_tensor(1)
ms, ns = (m*one).to(costs), (n*one).to(costs)
norm = - (ms + ns).log()
P = -temperature*costs
log_m = norm*torch.ones(b,m,1).to(costs)
log_n = norm*torch.ones(b,1,n).to(costs)
u,v = torch.zeros_like(log_m), torch.zeros_like(log_n)
for _ in range(iters):
u = log_m - torch.logsumexp(P + v, dim=2, keepdim=True)
v = log_n - torch.logsumexp(P + u, dim=1, keepdim=True)
P = P + u + v
P = P - norm
optimal = P.exp()
return optimal, None
def OT_sinkhorn_log_unmatch(costs, iters=20, temperature=None):
'''
find the correspondece with sinkhorn algorithm
:param costs: [b, m, n]
:param iters: the number of iterations
:return: the optimized scores [b,m,n]
'''
b, m, n = costs.shape
one = costs.new_tensor(1)
ms, ns = (m*one).to(costs), (n*one).to(costs)
bins1 = 1-costs.min(2, True)[0] #bxmx1
bins2 = 1-costs.min(1, True)[0] #bx1xn
corner = (bins1.mean(1,True)+bins2.mean(2,True))/2
costs = torch.cat([torch.cat([costs, bins1], -1),
torch.cat([bins2, corner], -1)], 1) #bx(m+1)x(n+1)
norm = - (ms + ns).log()
P = -temperature*costs
log_m = norm*torch.ones(b,m+1,1).to(costs)
log_n = norm*torch.ones(b,1,n+1).to(costs)
log_m[:,-1,:] = ns.log()+norm
log_n[:,:,-1] = ms.log()+norm
u,v = torch.zeros_like(log_m), torch.zeros_like(log_n)
for _ in range(iters):
u = log_m - torch.logsumexp(P + v, dim=2, keepdim=True)
v = log_n - torch.logsumexp(P + u, dim=1, keepdim=True)
P = P + u + v
P = P - norm
optimal = P.exp()
return optimal[:, :-1, :-1], optimal
def Dual_Softmax(costs, iters=None, temperature=None):
'''
find the correspondece with sinkhorn algorithm
:param costs: [b, m, n]
:param iters: the number of iterations
:return: the optimized scores [b,m,n]
'''
b, m, n = costs.shape
# scale = max(m,n)
scale = 1
if temperature is None:
costs_input = - 15 * scale * costs
else:
costs_input = - temperature * scale * costs
prob_col = F.softmax(costs_input, dim=2)
prob_row = F.softmax(costs_input, dim=1)
prob = prob_col*prob_col
return prob, None
def generate_kpts(inputs, outputs, nms_radius, num_pts=False, stable_prob=0.9, use_nms=True, stride=1):
"""
generate keypoints on the entire image
"""
preds1 = outputs['preds1']
preds2 = outputs['preds2']
kp_map1, kp_map2 = preds1['local_point'], preds2['local_point']
if torch.rand(1)<stable_prob: # stable select
kps1, kp_score1 = generate_kpts_single(kp_map1, nms_radius, num_pts, scale=4, stride=stride, use_nms=use_nms)
kps2, kp_score2 = generate_kpts_single(kp_map2, nms_radius, num_pts, scale=4, stride=stride, use_nms=use_nms)
else: # random select
temperature = 0.01/(outputs['epoch']+1)
kps1, kp_score1 = generate_kpts_single(kp_map1, nms_radius, num_pts, scale=4,
stable=False, temperature=temperature, stride=stride, use_nms=use_nms)
kps2, kp_score2 = generate_kpts_single(kp_map2, nms_radius, num_pts, scale=4,
stable=False, temperature=temperature, stride=stride, use_nms=use_nms)
return kps1, kps2, kp_score1, kp_score2
def generate_kpts_single(kp_map, nms_radius, num_pts=False, scale=4, stable=True, temperature=1, stride=1, use_nms=True, thr=False, thr_mod='mean'):
b, _, h, w = kp_map.shape
grids_org = gen_grid(h_min=-1, h_max=1, w_min=-1, w_max=1, len_h=h, len_w=w)
# h, w = scale*h, scale*w
# grids_org = gen_grid(h_min=0, h_max=h-1, w_min=0, w_max=w-1, len_h=h, len_w=w)
grids_org = grids_org.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)
grids_org = grids_org.permute(0,3,1,2) # bx2xhxw
# nms omits the boarder pixels of the original score map
# so that the mask size will be the same as processed score map
if use_nms == 'softnms': # softnms for softnms
nms_mask = soft_nms(kp_map[:,:,1:-1,1:-1], nms_radius)
elif use_nms: # True for hard nms
nms_mask = nms(kp_map[:,:,1:-1,1:-1], nms_radius)
elif not use_nms: # False for no nms
nms_mask = torch.ones((b,1,h-2,w-2)).to(kp_map)
if thr :
if thr_mod == 'max':
kp_thr = (kp_map[:,:,1:-1,1:-1]).reshape(b,1,-1).max(2)[0]
elif thr_mod == 'mean':
kp_thr = (kp_map[:,:,1:-1,1:-1]).reshape(b,1,-1).mean(2)
elif thr_mod == 'abs':
kp_thr = torch.tensor(1.).to(kp_map).repeat(b)
thr_mask = kp_map[:,:,1:-1,1:-1]>thr*kp_thr.view(b,1,1,1)
nms_mask = thr_mask*nms_mask
# process the score map and grids
grids = kp_map*grids_org
grids = F.avg_pool2d(grids, 3, stride=stride, padding=0)
kp_weight = F.avg_pool2d(kp_map, 3, stride=stride, padding=0)
grids = grids/kp_weight
kp_score_map = F.max_pool2d(kp_map, 3, stride=stride, padding=0)
if not num_pts:
if use_nms != 'softnms':
num_pts = (nms_mask.view(b,-1).sum(1).min()).int()
else:
# num_pts = (((nms_mask*kp_map[:,:,1:-1,1:-1]).view(b,-1)>thr*((nms_mask*kp_map[:,:,1:-1,1:-1]).view(b,-1).mean(1, True))).sum(1).min()).int()
num_pts = (thr_mask.view(b,-1).sum(1).min()).int()
else:
if use_nms != 'softnms' and num_pts>nms_mask.view(b,-1).sum(1).min():
num_pts = (nms_mask.view(b,-1).sum(1).min()).int()
if use_nms == 'softnms' and num_pts>thr_mask.view(b,-1).sum(1).min():
num_pts = (thr_mask.view(b,-1).sum(1).min()).int()
if num_pts < 128:
num_pts = 128
if stable:
_, idx = (nms_mask*kp_map[:,:,1:-1,1:-1]).permute(0,2,3,1).contiguous().view(b,-1).topk(num_pts)
kps = grids.permute(0,2,3,1).view(b,-1,2).gather(dim=1,index=idx.unsqueeze(-1).repeat(1,1,2))
kp_score = kp_score_map.permute(0,2,3,1).view(b,-1,1).gather(dim=1,index=idx.unsqueeze(-1))
else:
# select = gumbel_softmax(kp_map, num_pts, temperature) # bxnxhw
# kps = select@grids_org.permute(0,2,3,1).view(b,h*w,2)
# kp_score = select@kp_map.permute(0,2,3,1).view(b,h*w,1)
select = gumbel_softmax(nms_mask*kp_map[:,:,1:-1,1:-1], num_pts, temperature) # bxnxhw
kps = select@grids.permute(0,2,3,1).reshape(b,(h-2)*(w-2),2)
kp_score = select@kp_map[:,:,1:-1,1:-1].permute(0,2,3,1).reshape(b,(h-2)*(w-2),1)
return kps, kp_score
def generate_kpts_single_noavg(kp_map, nms_radius, num_pts=False, scale=4, stable=True, temperature=1, stride=1, use_nms=True, thr=False, thr_mod='mean'):
b, _, h, w = kp_map.shape
grids_org = gen_grid(h_min=-1, h_max=1, w_min=-1, w_max=1, len_h=h, len_w=w)
# h, w = scale*h, scale*w
# grids_org = gen_grid(h_min=0, h_max=h-1, w_min=0, w_max=w-1, len_h=h, len_w=w)
grids_org = grids_org.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)
grids_org = grids_org.permute(0,3,1,2) # bx2xhxw
# nms omits the boarder pixels of the original score map
# so that the mask size will be the same as processed score map
if use_nms == 'softnms': # softnms for softnms
nms_mask = soft_nms(kp_map, nms_radius)
elif use_nms: # True for hard nms
nms_mask = nms(kp_map, nms_radius)
elif not use_nms: # False for no nms
nms_mask = torch.ones((b,1,h,w)).to(kp_map)
if thr :
if thr_mod == 'max':
kp_thr = (kp_map).reshape(b,1,-1).max(2)[0]
elif thr_mod == 'mean':
kp_thr = (kp_map).reshape(b,1,-1).mean(2)
thr_mask = kp_map>thr*kp_thr.view(b,1,1,1)
nms_mask = thr_mask*nms_mask
grids = grids_org
if not num_pts:
if use_nms != 'softnms':
num_pts = (nms_mask.view(b,-1).sum(1).min()).int()
else:
# num_pts = (((nms_mask*kp_map[:,:,1:-1,1:-1]).view(b,-1)>thr*((nms_mask*kp_map[:,:,1:-1,1:-1]).view(b,-1).mean(1, True))).sum(1).min()).int()
num_pts = (thr_mask.view(b,-1).sum(1).min()).int()
else:
if use_nms != 'softnms' and num_pts>nms_mask.view(b,-1).sum(1).min():
num_pts = (nms_mask.view(b,-1).sum(1).min()).int()
if use_nms == 'softnms' and num_pts>thr_mask.view(b,-1).sum(1).min():
num_pts = (thr_mask.view(b,-1).sum(1).min()).int()
if num_pts < 128:
num_pts = 128
if stable:
_, idx = (nms_mask*kp_map).permute(0,2,3,1).contiguous().view(b,-1).topk(num_pts)
kps = grids.permute(0,2,3,1).view(b,-1,2).gather(dim=1,index=idx.unsqueeze(-1).repeat(1,1,2))
kp_score = kp_map.permute(0,2,3,1).view(b,-1,1).gather(dim=1,index=idx.unsqueeze(-1))
else:
# select = gumbel_softmax(kp_map, num_pts, temperature) # bxnxhw
# kps = select@grids_org.permute(0,2,3,1).view(b,h*w,2)
# kp_score = select@kp_map.permute(0,2,3,1).view(b,h*w,1)
select = gumbel_softmax(nms_mask*kp_map, num_pts, temperature) # bxnxhw
kps = select@grids.permute(0,2,3,1).reshape(b,(h-2)*(w-2),2)
kp_score = select@kp_map.permute(0,2,3,1).reshape(b,(h-2)*(w-2),1)
return kps, kp_score
# def unfold(tensor, grid_size):
# b,c,h,w = tensor.shape
# unfold_tensor = tensor.unfold(2, grid_size, grid_size).unfold(3, grid_size, grid_size) \
# .reshape(b, c, h//grid_size, w//grid_size, grid_size*grid_size)
# return unfold_tensor
def unfold(tensor, grid_size, stride=None):
if stride is None:
stride = grid_size
unfold_tensor = tensor.unfold(2, grid_size, stride).unfold(3, grid_size, stride)
b,c,h,w,g1,g2 = unfold_tensor.shape
unfold_tensor = unfold_tensor.reshape(b,c,h,w,g1*g2)
return unfold_tensor
def regular_sample(tensor):
b,c,h,w,g = tensor.shape
idx = torch.multinomial(tensor.reshape(-1,g), 1)
idx = idx.reshape(b,c,h,w,1)
return idx
def generate_kpts_regular_grid(inputs, outputs, grid_size, num_pts=False, stable_prob=0.9, use_nms=True, nms_radius=None):
preds1 = outputs['preds1']
preds2 = outputs['preds2']
kp_map1, kp_map2 = preds1['local_point'], preds2['local_point']
if torch.rand(1)<stable_prob: # stable select
kps1, kp_score1 = generate_kpts_regular_grid_single(kp_map1, grid_size, num_pts, scale=4, stable=True, use_nms=use_nms,
nms_radius=nms_radius)
kps2, kp_score2 = generate_kpts_regular_grid_single(kp_map2, grid_size, num_pts, scale=4, stable=True, use_nms=use_nms,
nms_radius=nms_radius)
else: # random select
kps1, kp_score1 = generate_kpts_regular_grid_single(kp_map1, grid_size, num_pts, scale=4, stable=False, use_nms=use_nms,
nms_radius=nms_radius)
kps2, kp_score2 = generate_kpts_regular_grid_single(kp_map2, grid_size, num_pts, scale=4, stable=False, use_nms=use_nms,
nms_radius=nms_radius)
return kps1, kps2, kp_score1, kp_score2
def generate_kpts_regular_grid_single(kp_map, grid_size, num_pts=False, scale=4, stable=True, use_nms=True, nms_radius=None, thr=None, thr_mod='mean'):
b, _, h, w = kp_map.shape
grids_org = gen_grid(h_min=-1, h_max=1, w_min=-1, w_max=1, len_h=h, len_w=w)
# h, w = scale*h, scale*w
# grids_org = gen_grid(h_min=0, h_max=h-1, w_min=0, w_max=w-1, len_h=h, len_w=w)
grids_org = grids_org.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)
grids_org = grids_org.permute(0,3,1,2) # bx2xhxw
if use_nms == 'softnms':
soft_mask = soft_nms(kp_map, nms_radius)
kp_map = soft_mask*kp_map
nms_mask = torch.ones_like(soft_mask).bool()
elif use_nms:
nms_mask = nms(kp_map, nms_radius)
else:
nms_mask = torch.ones_like(kp_map).bool()
if thr is not None:
if thr_mod == 'max':
kp_thr = kp_map.view(b,1,-1).max(2)[0]
elif thr_mod == 'mean':
kp_thr = kp_map.view(b,1,-1).mean(2)
thr_mask = kp_map>thr*kp_thr.view(b,1,1,1)
nms_mask = thr_mask&nms_mask
grids_unfold = unfold(grids_org, grid_size)
kpmap_unfold = unfold(kp_map, grid_size)
nms_unfold = unfold(nms_mask, grid_size)
kpmap_unfold_n = F.softmax(kpmap_unfold, dim=4)
if stable:
idx = kpmap_unfold_n.argmax(-1,True)
else:
idx = regular_sample(kpmap_unfold_n)
kps = grids_unfold.gather(dim=4, index=idx.repeat(1,2,1,1,1)) # bx2x(h//g)x(w//g)x1
kp_score = kpmap_unfold.gather(dim=4, index=idx) # bx1x(h//g)x(w//g)x1
mask = nms_unfold.gather(dim=4, index=idx) # bx1x(h//g)x(w//g)x1
kps = kps.reshape(b,2,-1).transpose(1,2) # bxnx2
kp_score = kp_score.reshape(b,1,-1).transpose(1,2) # bxnx1
mask = mask.reshape(b,1,-1).transpose(1,2) # bxnx1
if num_pts:
if num_pts > mask.sum(1).min():
num_pts=mask.sum(1).min()
kp_score, top_idx = (mask*kp_score).topk(num_pts, dim=1)
kps = kps.gather(dim=1, index=top_idx)
else:
if use_nms :
num_pts=mask.sum(1).min()
if num_pts < 128:
num_pts = 128
kp_score, top_idx = (mask*kp_score).topk(num_pts, dim=1)
kps = kps.gather(dim=1, index=top_idx.repeat(1,1,2))
return kps, kp_score
def soft_nms(score, patch_radius):
b,c,h,w = score.shape
window_size = 2*patch_radius + 1
padding_size = patch_radius
score = score.detach().contiguous()
# max_per_sample = torch.max(score.view(b,-1), dim=1)[0]
# score = score/max_per_sample.view(b,1,1,1)
# score = score.detach()
alpha_input = score - F.avg_pool2d(
F.pad(score, [padding_size]*4, mode='reflect'),
window_size, stride=1
)
alpha = F.softplus(alpha_input)
return alpha
def nms(score, patch_radius):
patch_size = 2*patch_radius+1
score_pad = F.pad(score.detach(), (patch_radius, patch_radius, patch_radius, patch_radius), mode='reflect')
# max_score = F.max_pool2d(score_pad, patch_size, stride=1, padding=0)
# mask = score==max_score
_, idx = F.max_pool2d(score_pad, patch_size, stride=1, padding=0, return_indices=True)
# if len(idx.shape) == 4:
# assert idx.shape[0] == 1
# idx = idx.squeeze(0)
b,_, h, w = score.shape
coords = torch.arange((h+2*patch_radius) * (w+2*patch_radius), device=score.device)\
.reshape(1, 1, h+2*patch_radius, w+2*patch_radius).repeat(b,1,1,1)
coords = coords[:,:,patch_radius:-patch_radius,patch_radius:-patch_radius]
mask = idx == coords
return mask
def gumbel_noise(shape, eps=1e-20):
U = torch.rand(shape)
U = U.cuda()
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(prob, num_points, temperature=1):
b, one, h, w = prob.shape
y = prob.view(b,1,h*w).repeat(1, num_points, 1) + gumbel_noise((b, num_points, h*w))
one_hot_soft = F.softmax(y/temperature, dim=2)
return one_hot_soft
def gumbel_softmax(prob, num_points, temperature=1, hard=False):
one_hot_soft = gumbel_softmax_sample(prob, num_points, temperature) # bx1xhw
if not hard:
return one_hot_soft
b, num, hw = one_hot_soft.shape
_, idx = one_hot_soft.max(dim=2)
one_hot = torch.zeros_like(one_hot_soft).view(-1, hw)
one_hot.scatter(dim=2, index=idx.view(-1, 1), src=1)
one_hot = one_hot.view(b, num, hw)
one_hot = (one_hot - one_hot_soft).detach() + one_hot
return one_hot
@torch.no_grad()
def valid_points(epipolar_line, im_size, linelen_thr):
'''
this function is actually the same as get_endpoints
return endpoints1 endpoints2 bxnx2
return valid bxn
'''
batch_size, _, n_pts = epipolar_line.shape
h, w = im_size
a = epipolar_line[:,0,:] #Bxn
b = epipolar_line[:,1,:]
c = epipolar_line[:,2,:]
point_l = torch.stack([torch.zeros_like(a), -c/b], -1) #Bxnx2
point_r = torch.stack([(w-1)*torch.ones_like(a), -(a*(w-1)+c)/b], -1)
point_u = torch.stack([-(b*(h-1)+c)/a, (h-1)*torch.ones_like(a)], -1)
point_b = torch.stack([-c/a, torch.zeros_like(a)], -1)
points = torch.stack([point_l, point_r, point_u, point_b], -1).transpose(2,3) #Bxnx4x2
mask = (points[:,:,:,0]>=0) & (points[:,:,:,0]<=w-1) & (points[:,:,:,1]>=0) & (points[:,:,:,1]<=h-1) #Bxnx4
valid = mask.sum(-1) == 2 #Bxn
mask[~valid] = torch.tensor([True, True, False, False]).to(mask.device)
points = points[mask].reshape(batch_size, n_pts, 2, 2)
points1 = points[:,:,0,:]
points2 = points[:,:,1,:]
endpoints_1_n = normalize_coords(points1, h, w)
endpoints_2_n = normalize_coords(points2, h, w)
line_len = endpoints_2_n - endpoints_1_n
len_mask = (line_len**2).sum(-1).sqrt()>linelen_thr
valid = valid&len_mask
return valid
@torch.no_grad()
def SSIM(x):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
x_pad = F.pad(x.abs(), (0,1,0,1), 'reflect')
x_lu = x_pad[:,:,:-1,:-1]
x_rb = x_pad[:,:,1:,1:]
x_lu = F.pad(x_lu, (1,1,1,1), 'reflect')
x_rb = F.pad(x_rb, (1,1,1,1), 'reflect')
m_x_lu = F.avg_pool2d(x_lu, 3, 1)
m_x_rb = F.avg_pool2d(x_rb, 3, 1)
sigma_x_lu = F.avg_pool2d(x_lu**2, 3, 1) - m_x_lu**2
sigma_x_rb = F.avg_pool2d(x_rb**2, 3, 1) - m_x_rb**2
sigma_x_lu_rb = F.avg_pool2d(x_lu*x_rb, 3, 1) - m_x_lu*m_x_rb
SSIM_n = (2 * m_x_lu * m_x_rb + C1) * (2 * sigma_x_lu_rb + C2)
SSIM_d = (m_x_lu ** 2 + m_x_rb ** 2 + C1) * (sigma_x_lu + sigma_x_rb + C2)
return torch.clamp((1 - SSIM_n / SSIM_d)/2, 0, 1).mean(1,True)
@torch.no_grad()
def D2(x):
b,c,h,w = x.shape
window_size = 3
padding_size = window_size//2
x = F.relu(x)
max_per_sample = torch.max(x.view(b,-1), dim=1)[0]
exp = torch.exp(x/max_per_sample.view(b,1,1,1))
sum_exp = (
window_size**2*
F.avg_pool2d(
F.pad(exp, [padding_size]*4, mode='constant', value=1.),
window_size, stride=1
)
)
local_max_score = exp / sum_exp
depth_wise_max = torch.max(x, dim=1)[0]
depth_wise_max_score = x / depth_wise_max.unsqueeze(1)
all_scores = local_max_score * depth_wise_max_score
score = torch.max(all_scores, dim=1)[0]
# score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1)
return score.unsqueeze(1)
@torch.no_grad()
def ASL_Peak(x):
b,c,h,w = x.shape
window_size = 3
padding_size = window_size//2
# x = F.relu(x)
max_per_sample = torch.max(x.view(b,-1), dim=1)[0]
x = x/max_per_sample.view(b,1,1,1)
alpha_input = x - F.avg_pool2d(
F.pad(x, [padding_size]*4, mode='reflect'),
window_size, stride=1
)
alpha = F.softplus(alpha_input)
beta_input = x - x.mean(1, True)
beta = F.softplus(beta_input)
all_scores = (alpha*beta).max(1,True)[0]
return all_scores
@torch.no_grad()
def generate_kpts_regular_grid_random(inputs, outputs, grid_size, map_init='identity', keep_spatial=False, random_select='random'):
"""
this is the function used to generate key points within regualr grid in descriptor initialization stage
"""
preds1 = outputs['preds1']
preds2 = outputs['preds2']
if map_init == 'identity':
kp_map1, kp_map2 = torch.ones_like(preds1['local_point']), torch.ones_like(preds2['local_point'])
elif map_init in ['SSIM', 'D2', 'ASL_Peak']:
func = eval(map_init)
kp_map1 = func(F.interpolate(preds1['local_map'], inputs['im1'].shape[2:], mode='bilinear'))
kp_map2 = func(F.interpolate(preds2['local_map'], inputs['im2'].shape[2:], mode='bilinear'))
kps1, kp_score1 = generate_kpts_regular_grid_random_single(kp_map1, grid_size, random_select)
kps2, kp_score2 = generate_kpts_regular_grid_random_single(kp_map2, grid_size, random_select)
if not keep_spatial:
b = kps1.shape[0]
kps1, kps2 = kps1.reshape(b,2,-1).transpose(1,2), kps2.reshape(b,2,-1).transpose(1,2)
kp_score1, kp_score2 = kp_score1.reshape(b,1,-1).transpose(1,2), kp_score2.reshape(b,1,-1).transpose(1,2)
else:
kps1, kps2 = kps1.squeeze(-1).permute(0,2,3,1), kps2.squeeze(-1).permute(0,2,3,1)
kp_score1, kp_score2 = kp_score1.permute(0,2,3,1), kp_score2.permute(0,2,3,1)
return kps1, kps2, kp_score1, kp_score2
def generate_kpts_regular_grid_random_single(kp_map, grid_size, random_select):
"""
note that the score returned by this function is the logp within the grid_size window
"""
b, _, h, w = kp_map.shape
if random_select == 'random':
grids_org = gen_grid(h_min=-1, h_max=1, w_min=-1, w_max=1, len_h=h, len_w=w)
grids_org = grids_org.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)
grids_org = grids_org.permute(0,3,1,2) # bx2xhxw
kpmap_unfold = unfold(kp_map, grid_size) # bx1x(h//g)x(w//g)x(g*g)
proposal_dist = Categorical(logits=kpmap_unfold)
proposals = proposal_dist.sample() # bx1x(h//g)x(w//g)
proposal_logp = proposal_dist.log_prob(proposals) # bx1x(h//g)x(w//g)
kp_score = torch.gather(kpmap_unfold, dim=-1, index=proposals[..., None]).squeeze(-1) # bx1x(h//g)x(w//g)
grids_unfold = unfold(grids_org, grid_size) # bx2x(h//g)x(w//g)x(g*g)
kps = grids_unfold.gather(dim=4, index=proposals.unsqueeze(-1).repeat(1,2,1,1,1))
elif random_select == 'regular_random':
start = 0.5*grid_size/h
num_w = w//grid_size
num_h = h//grid_size
kps = gen_grid(h_min=-1+start, h_max=1-start, w_min=-1+start, w_max=1-start, len_h=num_h, len_w=num_w)
regular_rand = start*(2*torch.rand(b,1,1,2)-1).to(kp_map)
kps = kps.reshape(num_h, num_w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map) + regular_rand
kp_score = F.grid_sample(kp_map, kps, padding_mode='zeros', align_corners=False) # bx1x(h//g)x(w//g)
kps = kps.permute(0,3,1,2)
else:
start = 0.5*grid_size/h
num_w = w//grid_size
num_h = h//grid_size
kps = gen_grid(h_min=-1+start, h_max=1-start, w_min=-1+start, w_max=1-start, len_h=num_h, len_w=num_w)
kps = kps.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)
kp_score = F.grid_sample(kp_map, kps, padding_mode='zeros', align_corners=False) # bx1x(h//g)x(w//g)
kps = kps.permute(0,3,1,2) # bx2x(h//g)x(w//g)
return kps, kp_score
@torch.no_grad()
def epipolar_line_search(coord, Fmat, feat1, featmap2, h, w, line_step=100, use_nn=True, loc_rand=True, window_size=0.125, visualize=False):
batch_size, n_dim, h2, w2 = featmap2.shape
n_pts = coord.shape[1]
endpoints_1_n, endpoints_2_n, valid=get_endpoints(coord, Fmat, h, w)
sample_grids = torch.stack([torch.linspace(0., 1., line_step), torch.linspace(0., 1., line_step)], -1).to(coord.device) # stepx2
line_len = endpoints_2_n - endpoints_1_n #bxnx2
# weight_len = (line_len[:,:,0]**2+line_len[:,:,1]**2).sqrt() #bxn decide the weight according to the epipolar line length, which belongs to [0, 2*sqrt(2)]
sample_grids = line_len[:,:,None,:]*sample_grids[None,None,:,:] #bxnxstepx2
sample_grids = sample_grids+endpoints_1_n[:,:,None,:]
sample_points = F.grid_sample(featmap2, sample_grids, padding_mode='border', align_corners=False).permute(0, 2, 3, 1) # Bxnxstepxd
prob = compute_prob(feat1.reshape(batch_size*n_pts, 1, n_dim),
sample_points.reshape(batch_size*n_pts, line_step, n_dim)).reshape(batch_size, n_pts, line_step)
# expected_coord = torch.sum(sample_grids * prob.unsqueeze(-1), dim=2) # Bxnx2
if use_nn:
mask = prob==prob.max(-1,True)[0]
expected_coord = (mask.unsqueeze(-1)*sample_grids).sum(2) # bxnx2
else:
expected_coord = (prob.unsqueeze(-1)*sample_grids).sum(2) # Bxnx2
if loc_rand:
expected_coord_org = expected_coord
expected_coord = expected_coord + 0.707*window_size*(2*torch.rand(expected_coord.shape).type_as(expected_coord)-1)
boarder_mask = (expected_coord[:,:,0]>=-1) & (expected_coord[:,:,0]<=1) & (expected_coord[:,:,1]>=-1) & (expected_coord[:,:,1]<=1)
valid = valid & boarder_mask
var = torch.sum(sample_grids**2 * prob.unsqueeze(-1), dim=2) - expected_coord**2 # Bxnx2
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1)
if visualize:
return expected_coord, expected_coord_org, valid, std, prob
else:
return expected_coord, expected_coord_org, valid, std
@torch.no_grad()
def get_endpoints(coords, Fmat, h, w):
'''
return endpoints1 endpoints2 bxnx2
return valid bxn
'''
batch_size, n_pts, _ = coords.shape
coord_h = homogenize(coords).transpose(1, 2)
epipolar_line = Fmat.bmm(coord_h)
a = epipolar_line[:,0,:] #Bxn
b = epipolar_line[:,1,:]
c = epipolar_line[:,2,:]
point_l = torch.stack([torch.zeros_like(a), -c/b], -1) #Bxnx2
point_r = torch.stack([(w-1)*torch.ones_like(a), -(a*(w-1)+c)/b], -1)
point_u = torch.stack([-(b*(h-1)+c)/a, (h-1)*torch.ones_like(a)], -1)
point_b = torch.stack([-c/a, torch.zeros_like(a)], -1)
points = torch.stack([point_l, point_r, point_u, point_b], -1).transpose(2,3) #Bxnx4x2
mask = (points[:,:,:,0]>=0) & (points[:,:,:,0]<=w-1) & (points[:,:,:,1]>=0) & (points[:,:,:,1]<=h-1) #Bxnx4
valid = mask.sum(-1) == 2 #Bxn
mask[~valid] = torch.tensor([True, True, False, False]).to(mask.device)
points = points[mask].reshape(batch_size, n_pts, 2, 2)
points1 = points[:,:,0,:]
points2 = points[:,:,1,:]
return normalize_coords(points1,h,w), normalize_coords(points2,h,w), valid
def get_expected_correspondence_within_window(feat1, featmap2, coord2_n, window_size, with_std=False, with_sim=False):
'''
:param feat1: the feature vectors of query points [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the reference image [batch_size, n_dim, h, w]
:param coord2_n: normalized center locations [batch_size, n_pts, 2]
:param with_std: if True, return the standard deviation
:return: the normalized expected correspondence locations, [batch_size, n_pts, 2], optionally with std
'''
batch_size, n_dim, h2, w2 = featmap2.shape
n_pts = coord2_n.shape[1]
grid_n = gen_grid(h_min=-window_size, h_max=window_size,
w_min=-window_size, w_max=window_size,
len_h=int(window_size*h2), len_w=int(window_size*w2))
grid_n_ = grid_n.repeat(batch_size, 1, 1, 1).to(coord2_n) # Bx1xhwx2
coord2_n_grid = coord2_n.unsqueeze(-2) + grid_n_ # Bxnxhwx2
feat2_win = F.grid_sample(featmap2, coord2_n_grid, padding_mode='zeros', align_corners=False).permute(0, 2, 3, 1) # Bxnxhwxd
feat1 = feat1.unsqueeze(-2)
prob, sim = compute_prob(feat1.reshape(batch_size*n_pts, -1, n_dim),
feat2_win.reshape(batch_size*n_pts, -1, n_dim), return_sim=True)#.reshape(batch_size, n_pts, -1)
prob = prob.reshape(batch_size, n_pts, -1)
expected_coord2_n = torch.sum(coord2_n_grid * prob.unsqueeze(-1), dim=2) # Bxnx2
re_list = [expected_coord2_n, coord2_n_grid]
if with_std:
var = torch.sum(coord2_n_grid**2 * prob.unsqueeze(-1), dim=2) - expected_coord2_n**2 # Bxnx2
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # Bxn
# return expected_coord2_n, coord2_n_grid, std, prob
re_list.append(std)
re_list.append(prob)
# else:
# return expected_coord2_n, coord2_n_grid
if with_sim:
re_list.append(sim.reshape(batch_size, n_pts, int(window_size*h2), int(window_size*w2)))
return tuple(re_list)
@torch.no_grad()
def generate_kpts_disk(inputs, outputs, grid_size, keep_spatial=False):
preds1 = outputs['preds1']
preds2 = outputs['preds2']
kp_map1, kp_map2 = preds1['local_point'], preds2['local_point']
kps1, logp1, accept_mask1 = generate_kpts_disk_single(kp_map1, grid_size)
kps2, logp2, accept_mask2 = generate_kpts_disk_single(kp_map2, grid_size)
return kps1, kps2, logp1, logp2
def generate_kpts_disk_single(kp_map, grid_size):
b,_,h,w = kp_map.shape
grids_org = gen_grid(h_min=-1, h_max=1, w_min=-1, w_max=1, len_h=h, len_w=w)
grids_org = grids_org.reshape(h, w, 2)[None, :, :, :].repeat(b, 1, 1, 1).to(kp_map)
grids_org = grids_org.permute(0,3,1,2)
grids_unfold = unfold(grids_org, grid_size) # bx2x(h//g)x(w//g)x(g*g)
kpmap_unfold = unfold(kp_map, grid_size)
proposal_dist = Categorical(logits=kpmap_unfold)
proposals = proposal_dist.sample() # bx1x(h//g)x(w//g)
proposal_logp = proposal_dist.log_prob(proposals)
accept_logits = torch.gather(logits, dim=-1, index=proposals[..., None]).squeeze(-1) # bx1x(h//g)x(w//g)
accept_dist = Bernoulli(logits=accept_logits)
accept_samples = accept_dist.sample() # bx1x(h//g)x(w//g)
accept_logp = accept_dist.log_prob(accept_samples) # for accepted points, equals to sigmoid() then log(); for denied, (1-sigmoid).log
accept_mask = accept_samples == 1.
logp = proposal_logp + accept_logp
kps = grids_unfold.gather(dim=4, index=proposals.unsqueeze(-1).repeat(1,2,1,1,1))
return kps, logp, accept_mask
def mnn_matcher(descriptors_a, descriptors_b):
device = descriptors_a.device
sim = descriptors_a @ descriptors_b.t()
nn12 = torch.max(sim, dim=1)[1]
nn21 = torch.max(sim, dim=0)[1]
ids1 = torch.arange(0, sim.shape[0], device=device)
mask = (ids1 == nn21[nn12])
matches = torch.stack([ids1[mask], nn12[mask]])
return matches.t().data.cpu().numpy()
def cycle(iterable):
while True:
for x in iterable:
yield x | 34,450 | 41.637376 | 159 | py |
PoSFeat | PoSFeat-main/losses/preprocess.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from . import preprocess_utils as putils
from .preprocess_utils import *
class Preprocess_Line2Window(nn.Module):
'''
the preprocess class for grid-with-line pipeline
'''
def __init__(self, configs, device=None, vis=False):
super(Preprocess_Line2Window, self).__init__()
self.__lossname__ = 'Preprocess_Line2Window'
self.config = configs
self.kps_generator = getattr(putils, self.config['kps_generator'])
self.t_base = self.config['temperature_base']
self.t_max = self.config['temperature_max']
if device is not None:
self.device = device
def name(self):
return self.__lossname__
def forward(self, inputs, outputs):
preds1 = outputs['preds1']
preds2 = outputs['preds2']
xc1, xf1 = preds1['global_map'], preds1['local_map']
xc2, xf2 = preds2['global_map'], preds2['local_map']
h1i, w1i = inputs['im1'].size()[2:]
h2i, w2i = inputs['im2'].size()[2:]
b, _, hf, wf = xf1.shape
temperature = min(self.t_base + outputs['epoch'], self.t_max)
"""
firstly, we search locate the correspondence with grid points
with keep_spatial==True, coord (score) is with bxhxwx2 (bxhxwx1)
with keep_spatial==False, coord (score) is with bx(h*w)x2 (bx(h*w)x1)
the keep_spatial is defined in self.config['kps_generator_config']
首先,我们随所有的抽样点进行匹配搜索
在配置文件中有一个选项 keep_spatial 可以控制输出的抽样点的shape
This is a coarse search with grid points matching,which is similar to the coarse search in caps
in fact this coarse matching is just for ablation, and the results are not used in the final loss
you can comment out this search
这里包含了一部分粗略匹配的代码,类似于CAPS中的粗略匹配
粗匹配的结果是最开始实验时进行的探索,实际上并没有用于最后的损失函数计算
可以注释掉粗略匹配的代码
"""
coord1_n, coord2_n, score1, score2 = self.kps_generator(inputs, outputs, **self.config['kps_generator_config'])
_, hkps, wkps, _ = coord1_n.shape
coord1 = denormalize_coords(coord1_n.reshape(b,-1,2), h1i, w1i)
coord2 = denormalize_coords(coord2_n.reshape(b,-1,2), h2i, w2i)
feat1_fine = sample_feat_by_coord(xf1, coord1_n.reshape(b,-1,2), self.config['loss_distance']=='cos')
feat2_fine = sample_feat_by_coord(xf2, coord2_n.reshape(b,-1,2), self.config['loss_distance']=='cos')
cos_sim = feat1_fine @ feat2_fine.transpose(1,2) # bxmxn
feat1g_corloc = (F.softmax(temperature*cos_sim, dim=2)).unsqueeze(-1)*coord2.reshape(b,-1,2).unsqueeze(1) #bxmxnx2
feat1g_corloc = feat1g_corloc.sum(2) #bxmx2
feat2g_corloc = (F.softmax(temperature*cos_sim, dim=1)).unsqueeze(-1)*coord1.reshape(b,-1,2).unsqueeze(2) #bxmxnx2
feat2g_corloc = feat2g_corloc.sum(1) #bxnx2
with torch.no_grad():
if self.config['use_nn_grid']:
_, max_idx1 = cor_mat.max(2)
feat1g_corloc_n = coord2_n.reshape(b,-1,2).gather(dim=1, index=max_idx1[:,:,None].repeat(1,1,2))
_, max_idx2 = cor_mat.max(1)
feat2g_corloc_n = coord1_n.reshape(b,-1,2).gather(dim=1, index=max_idx2[:,:,None].repeat(1,1,2))
else:
feat1g_corloc_n = normalize_coords(feat1g_corloc, h2i, w2i)
feat2g_corloc_n = normalize_coords(feat2g_corloc, h1i, w1i)
feat1g_std = (F.softmax(temperature*cos_sim, dim=2)).unsqueeze(-1)*(coord2_n.reshape(b,1,-1,2)**2)
feat1g_std = feat1g_std.sum(2) - (feat1g_corloc_n**2)
feat1g_std = feat1g_std.clamp(min=1e-6).sqrt().sum(-1) #bxn
feat2g_std = (F.softmax(temperature*cos_sim, dim=1)).unsqueeze(-1)*(coord1_n.reshape(b,-1,1,2)**2)
feat2g_std = feat2g_std.sum(1) - (feat2g_corloc_n**2)
feat2g_std = feat2g_std.clamp(min=1e-6).sqrt().sum(-1) #bxn
if self.config['use_line_search']:
feat1_c_corloc_n_, feat1_c_corloc_n_org, valid1, epi_std1 = epipolar_line_search(coord1, inputs['F1'], feat1_fine,
temperature*F.normalize(xf2,p=2.0,dim=1), h2i, w2i, window_size=self.config['window_size'], **self.config['line_search_config'])
feat2_c_corloc_n_, feat2_c_corloc_n_org, valid2, epi_std2 = epipolar_line_search(coord2, inputs['F2'], feat2_fine,
temperature*F.normalize(xf1,p=2.0,dim=1), h1i, w1i, window_size=self.config['window_size'], **self.config['line_search_config'])
feat1c_corloc_org = denormalize_coords(feat1_c_corloc_n_org, h2i, w2i)
feat2c_corloc_org = denormalize_coords(feat2_c_corloc_n_org, h1i, w1i)
else:
feat1_c_corloc_n_ = feat1g_corloc_n.detach()
feat2_c_corloc_n_ = feat2g_corloc_n.detach()
feat1c_corloc_org = feat1_c_corloc_n_
feat2c_corloc_org = feat2_c_corloc_n_
valid1 = torch.ones_like(feat1g_std).bool()
valid2 = torch.ones_like(feat2g_std).bool()
feat1w_corloc_n, window_coords_n_1in2, feat1w_std, _ = get_expected_correspondence_within_window(
feat1_fine, temperature*F.normalize(xf2,p=2.0,dim=1), feat1_c_corloc_n_, self.config['window_size'], with_std=True)
feat2w_corloc_n, window_coords_n_2in1, feat2w_std, _ = get_expected_correspondence_within_window(
feat2_fine, temperature*F.normalize(xf1,p=2.0,dim=1), feat2_c_corloc_n_, self.config['window_size'], with_std=True)
feat1w_corloc = denormalize_coords(feat1w_corloc_n, h2i, w2i)
feat2w_corloc = denormalize_coords(feat2w_corloc_n, h1i, w1i)
return {
'coord1':coord1, 'coord2':coord2,
'feat1g_corloc':feat1g_corloc,
'feat2g_corloc':feat2g_corloc,
'feat1w_corloc':feat1w_corloc,
'feat2w_corloc':feat2w_corloc,
'feat1c_corloc_org':feat1c_corloc_org,
'feat2c_corloc_org':feat2_c_corloc_n_org,
'feat1g_std':feat1g_std, 'feat2g_std':feat2g_std,
'feat1w_std':feat1w_std, 'feat2w_std':feat2w_std,
'temperature':temperature,
'valid_epi1':valid1, 'valid_epi2':valid2
}
class Preprocess_Skip(nn.Module):
'''
the preprocess class for keypoint detection net training
'''
def __init__(self, **kargs):
super(Preprocess_Skip, self).__init__()
self.__lossname__ = 'Preprocess_Skip'
def forward(self, inputs, outputs):
return None
| 6,528 | 49.223077 | 144 | py |
Inductive-representation-learning-on-temporal-graphs | Inductive-representation-learning-on-temporal-graphs-master/module.py | import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class MergeLayer(torch.nn.Module):
def __init__(self, dim1, dim2, dim3, dim4):
super().__init__()
#self.layer_norm = torch.nn.LayerNorm(dim1 + dim2)
self.fc1 = torch.nn.Linear(dim1 + dim2, dim3)
self.fc2 = torch.nn.Linear(dim3, dim4)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=1)
#x = self.layer_norm(x)
h = self.act(self.fc1(x))
return self.fc2(h)
class ScaledDotProductAttention(torch.nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(attn_dropout)
self.softmax = torch.nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -1e10)
attn = self.softmax(attn) # [n * b, l_q, l_k]
attn = self.dropout(attn) # [n * b, l_v, d]
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), attn_dropout=dropout)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
#output = self.layer_norm(output)
return output, attn
class MapBasedMultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.wq_node_transform = nn.Linear(d_model, n_head * d_k, bias=False)
self.wk_node_transform = nn.Linear(d_model, n_head * d_k, bias=False)
self.wv_node_transform = nn.Linear(d_model, n_head * d_k, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
self.act = nn.LeakyReLU(negative_slope=0.2)
self.weight_map = nn.Linear(2 * d_k, 1, bias=False)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = torch.nn.Dropout(dropout)
self.softmax = torch.nn.Softmax(dim=2)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.wq_node_transform(q).view(sz_b, len_q, n_head, d_k)
k = self.wk_node_transform(k).view(sz_b, len_k, n_head, d_k)
v = self.wv_node_transform(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
q = torch.unsqueeze(q, dim=2) # [(n*b), lq, 1, dk]
q = q.expand(q.shape[0], q.shape[1], len_k, q.shape[3]) # [(n*b), lq, lk, dk]
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
k = torch.unsqueeze(k, dim=1) # [(n*b), 1, lk, dk]
k = k.expand(k.shape[0], len_q, k.shape[2], k.shape[3]) # [(n*b), lq, lk, dk]
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x lq x lk
## Map based Attention
#output, attn = self.attention(q, k, v, mask=mask)
q_k = torch.cat([q, k], dim=3) # [(n*b), lq, lk, dk * 2]
attn = self.weight_map(q_k).squeeze(dim=3) # [(n*b), lq, lk]
if mask is not None:
attn = attn.masked_fill(mask, -1e10)
attn = self.softmax(attn) # [n * b, l_q, l_k]
attn = self.dropout(attn) # [n * b, l_q, l_k]
# [n * b, l_q, l_k] * [n * b, l_v, d_v] >> [n * b, l_q, d_v]
output = torch.bmm(attn, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.act(self.fc(output)))
output = self.layer_norm(output + residual)
return output, attn
def expand_last_dim(x, num):
view_size = list(x.size()) + [1]
expand_size = list(x.size()) + [num]
return x.view(view_size).expand(expand_size)
class TimeEncode(torch.nn.Module):
def __init__(self, expand_dim, factor=5):
super(TimeEncode, self).__init__()
#init_len = np.array([1e8**(i/(time_dim-1)) for i in range(time_dim)])
time_dim = expand_dim
self.factor = factor
self.basis_freq = torch.nn.Parameter((torch.from_numpy(1 / 10 ** np.linspace(0, 9, time_dim))).float())
self.phase = torch.nn.Parameter(torch.zeros(time_dim).float())
#self.dense = torch.nn.Linear(time_dim, expand_dim, bias=False)
#torch.nn.init.xavier_normal_(self.dense.weight)
def forward(self, ts):
# ts: [N, L]
batch_size = ts.size(0)
seq_len = ts.size(1)
ts = ts.view(batch_size, seq_len, 1)# [N, L, 1]
map_ts = ts * self.basis_freq.view(1, 1, -1) # [N, L, time_dim]
map_ts += self.phase.view(1, 1, -1)
harmonic = torch.cos(map_ts)
return harmonic #self.dense(harmonic)
class PosEncode(torch.nn.Module):
def __init__(self, expand_dim, seq_len):
super().__init__()
self.pos_embeddings = nn.Embedding(num_embeddings=seq_len, embedding_dim=expand_dim)
def forward(self, ts):
# ts: [N, L]
order = ts.argsort()
ts_emb = self.pos_embeddings(order)
return ts_emb
class EmptyEncode(torch.nn.Module):
def __init__(self, expand_dim):
super().__init__()
self.expand_dim = expand_dim
def forward(self, ts):
out = torch.zeros_like(ts).float()
out = torch.unsqueeze(out, dim=-1)
out = out.expand(out.shape[0], out.shape[1], self.expand_dim)
return out
class LSTMPool(torch.nn.Module):
def __init__(self, feat_dim, edge_dim, time_dim):
super(LSTMPool, self).__init__()
self.feat_dim = feat_dim
self.time_dim = time_dim
self.edge_dim = edge_dim
self.att_dim = feat_dim + edge_dim + time_dim
self.act = torch.nn.ReLU()
self.lstm = torch.nn.LSTM(input_size=self.att_dim,
hidden_size=self.feat_dim,
num_layers=1,
batch_first=True)
self.merger = MergeLayer(feat_dim, feat_dim, feat_dim, feat_dim)
def forward(self, src, src_t, seq, seq_t, seq_e, mask):
# seq [B, N, D]
# mask [B, N]
seq_x = torch.cat([seq, seq_e, seq_t], dim=2)
_, (hn, _) = self.lstm(seq_x)
hn = hn[-1, :, :] #hn.squeeze(dim=0)
out = self.merger.forward(hn, src)
return out, None
class MeanPool(torch.nn.Module):
def __init__(self, feat_dim, edge_dim):
super(MeanPool, self).__init__()
self.edge_dim = edge_dim
self.feat_dim = feat_dim
self.act = torch.nn.ReLU()
self.merger = MergeLayer(edge_dim + feat_dim, feat_dim, feat_dim, feat_dim)
def forward(self, src, src_t, seq, seq_t, seq_e, mask):
# seq [B, N, D]
# mask [B, N]
src_x = src
seq_x = torch.cat([seq, seq_e], dim=2) #[B, N, De + D]
hn = seq_x.mean(dim=1) #[B, De + D]
output = self.merger(hn, src_x)
return output, None
class AttnModel(torch.nn.Module):
"""Attention based temporal layers
"""
def __init__(self, feat_dim, edge_dim, time_dim,
attn_mode='prod', n_head=2, drop_out=0.1):
"""
args:
feat_dim: dim for the node features
edge_dim: dim for the temporal edge features
time_dim: dim for the time encoding
attn_mode: choose from 'prod' and 'map'
n_head: number of heads in attention
drop_out: probability of dropping a neural.
"""
super(AttnModel, self).__init__()
self.feat_dim = feat_dim
self.time_dim = time_dim
self.edge_in_dim = (feat_dim + edge_dim + time_dim)
self.model_dim = self.edge_in_dim
#self.edge_fc = torch.nn.Linear(self.edge_in_dim, self.feat_dim, bias=False)
self.merger = MergeLayer(self.model_dim, feat_dim, feat_dim, feat_dim)
#self.act = torch.nn.ReLU()
assert(self.model_dim % n_head == 0)
self.logger = logging.getLogger(__name__)
self.attn_mode = attn_mode
if attn_mode == 'prod':
self.multi_head_target = MultiHeadAttention(n_head,
d_model=self.model_dim,
d_k=self.model_dim // n_head,
d_v=self.model_dim // n_head,
dropout=drop_out)
self.logger.info('Using scaled prod attention')
elif attn_mode == 'map':
self.multi_head_target = MapBasedMultiHeadAttention(n_head,
d_model=self.model_dim,
d_k=self.model_dim // n_head,
d_v=self.model_dim // n_head,
dropout=drop_out)
self.logger.info('Using map based attention')
else:
raise ValueError('attn_mode can only be prod or map')
def forward(self, src, src_t, seq, seq_t, seq_e, mask):
""""Attention based temporal attention forward pass
args:
src: float Tensor of shape [B, D]
src_t: float Tensor of shape [B, Dt], Dt == D
seq: float Tensor of shape [B, N, D]
seq_t: float Tensor of shape [B, N, Dt]
seq_e: float Tensor of shape [B, N, De], De == D
mask: boolean Tensor of shape [B, N], where the true value indicate a null value in the sequence.
returns:
output, weight
output: float Tensor of shape [B, D]
weight: float Tensor of shape [B, N]
"""
src_ext = torch.unsqueeze(src, dim=1) # src [B, 1, D]
src_e_ph = torch.zeros_like(src_ext)
q = torch.cat([src_ext, src_e_ph, src_t], dim=2) # [B, 1, D + De + Dt] -> [B, 1, D]
k = torch.cat([seq, seq_e, seq_t], dim=2) # [B, 1, D + De + Dt] -> [B, 1, D]
mask = torch.unsqueeze(mask, dim=2) # mask [B, N, 1]
mask = mask.permute([0, 2, 1]) #mask [B, 1, N]
# # target-attention
output, attn = self.multi_head_target(q=q, k=k, v=k, mask=mask) # output: [B, 1, D + Dt], attn: [B, 1, N]
output = output.squeeze()
attn = attn.squeeze()
output = self.merger(output, src)
return output, attn
class TGAN(torch.nn.Module):
def __init__(self, ngh_finder, n_feat, e_feat,
attn_mode='prod', use_time='time', agg_method='attn', node_dim=None, time_dim=None,
num_layers=3, n_head=4, null_idx=0, num_heads=1, drop_out=0.1, seq_len=None):
super(TGAN, self).__init__()
self.num_layers = num_layers
self.ngh_finder = ngh_finder
self.null_idx = null_idx
self.logger = logging.getLogger(__name__)
self.n_feat_th = torch.nn.Parameter(torch.from_numpy(n_feat.astype(np.float32)))
self.e_feat_th = torch.nn.Parameter(torch.from_numpy(e_feat.astype(np.float32)))
self.edge_raw_embed = torch.nn.Embedding.from_pretrained(self.e_feat_th, padding_idx=0, freeze=True)
self.node_raw_embed = torch.nn.Embedding.from_pretrained(self.n_feat_th, padding_idx=0, freeze=True)
self.feat_dim = self.n_feat_th.shape[1]
self.n_feat_dim = self.feat_dim
self.e_feat_dim = self.feat_dim
self.model_dim = self.feat_dim
self.use_time = use_time
self.merge_layer = MergeLayer(self.feat_dim, self.feat_dim, self.feat_dim, self.feat_dim)
if agg_method == 'attn':
self.logger.info('Aggregation uses attention model')
self.attn_model_list = torch.nn.ModuleList([AttnModel(self.feat_dim,
self.feat_dim,
self.feat_dim,
attn_mode=attn_mode,
n_head=n_head,
drop_out=drop_out) for _ in range(num_layers)])
elif agg_method == 'lstm':
self.logger.info('Aggregation uses LSTM model')
self.attn_model_list = torch.nn.ModuleList([LSTMPool(self.feat_dim,
self.feat_dim,
self.feat_dim) for _ in range(num_layers)])
elif agg_method == 'mean':
self.logger.info('Aggregation uses constant mean model')
self.attn_model_list = torch.nn.ModuleList([MeanPool(self.feat_dim,
self.feat_dim) for _ in range(num_layers)])
else:
raise ValueError('invalid agg_method value, use attn or lstm')
if use_time == 'time':
self.logger.info('Using time encoding')
self.time_encoder = TimeEncode(expand_dim=self.n_feat_th.shape[1])
elif use_time == 'pos':
assert(seq_len is not None)
self.logger.info('Using positional encoding')
self.time_encoder = PosEncode(expand_dim=self.n_feat_th.shape[1], seq_len=seq_len)
elif use_time == 'empty':
self.logger.info('Using empty encoding')
self.time_encoder = EmptyEncode(expand_dim=self.n_feat_th.shape[1])
else:
raise ValueError('invalid time option!')
self.affinity_score = MergeLayer(self.feat_dim, self.feat_dim, self.feat_dim, 1) #torch.nn.Bilinear(self.feat_dim, self.feat_dim, 1, bias=True)
def forward(self, src_idx_l, target_idx_l, cut_time_l, num_neighbors=20):
src_embed = self.tem_conv(src_idx_l, cut_time_l, self.num_layers, num_neighbors)
target_embed = self.tem_conv(target_idx_l, cut_time_l, self.num_layers, num_neighbors)
score = self.affinity_score(src_embed, target_embed).squeeze(dim=-1)
return score
def contrast(self, src_idx_l, target_idx_l, background_idx_l, cut_time_l, num_neighbors=20):
src_embed = self.tem_conv(src_idx_l, cut_time_l, self.num_layers, num_neighbors)
target_embed = self.tem_conv(target_idx_l, cut_time_l, self.num_layers, num_neighbors)
background_embed = self.tem_conv(background_idx_l, cut_time_l, self.num_layers, num_neighbors)
pos_score = self.affinity_score(src_embed, target_embed).squeeze(dim=-1)
neg_score = self.affinity_score(src_embed, background_embed).squeeze(dim=-1)
return pos_score.sigmoid(), neg_score.sigmoid()
def tem_conv(self, src_idx_l, cut_time_l, curr_layers, num_neighbors=20):
assert(curr_layers >= 0)
device = self.n_feat_th.device
batch_size = len(src_idx_l)
src_node_batch_th = torch.from_numpy(src_idx_l).long().to(device)
cut_time_l_th = torch.from_numpy(cut_time_l).float().to(device)
cut_time_l_th = torch.unsqueeze(cut_time_l_th, dim=1)
# query node always has the start time -> time span == 0
src_node_t_embed = self.time_encoder(torch.zeros_like(cut_time_l_th))
src_node_feat = self.node_raw_embed(src_node_batch_th)
if curr_layers == 0:
return src_node_feat
else:
src_node_conv_feat = self.tem_conv(src_idx_l,
cut_time_l,
curr_layers=curr_layers - 1,
num_neighbors=num_neighbors)
src_ngh_node_batch, src_ngh_eidx_batch, src_ngh_t_batch = self.ngh_finder.get_temporal_neighbor(
src_idx_l,
cut_time_l,
num_neighbors=num_neighbors)
src_ngh_node_batch_th = torch.from_numpy(src_ngh_node_batch).long().to(device)
src_ngh_eidx_batch = torch.from_numpy(src_ngh_eidx_batch).long().to(device)
src_ngh_t_batch_delta = cut_time_l[:, np.newaxis] - src_ngh_t_batch
src_ngh_t_batch_th = torch.from_numpy(src_ngh_t_batch_delta).float().to(device)
# get previous layer's node features
src_ngh_node_batch_flat = src_ngh_node_batch.flatten() #reshape(batch_size, -1)
src_ngh_t_batch_flat = src_ngh_t_batch.flatten() #reshape(batch_size, -1)
src_ngh_node_conv_feat = self.tem_conv(src_ngh_node_batch_flat,
src_ngh_t_batch_flat,
curr_layers=curr_layers - 1,
num_neighbors=num_neighbors)
src_ngh_feat = src_ngh_node_conv_feat.view(batch_size, num_neighbors, -1)
# get edge time features and node features
src_ngh_t_embed = self.time_encoder(src_ngh_t_batch_th)
src_ngn_edge_feat = self.edge_raw_embed(src_ngh_eidx_batch)
# attention aggregation
mask = src_ngh_node_batch_th == 0
attn_m = self.attn_model_list[curr_layers - 1]
local, weight = attn_m(src_node_conv_feat,
src_node_t_embed,
src_ngh_feat,
src_ngh_t_embed,
src_ngn_edge_feat,
mask)
return local | 20,815 | 39.030769 | 151 | py |
Inductive-representation-learning-on-temporal-graphs | Inductive-representation-learning-on-temporal-graphs-master/graph.py | import numpy as np
import torch
class NeighborFinder:
def __init__(self, adj_list, uniform=False):
"""
Params
------
node_idx_l: List[int]
node_ts_l: List[int]
off_set_l: List[int], such that node_idx_l[off_set_l[i]:off_set_l[i + 1]] = adjacent_list[i]
"""
node_idx_l, node_ts_l, edge_idx_l, off_set_l = self.init_off_set(adj_list)
self.node_idx_l = node_idx_l
self.node_ts_l = node_ts_l
self.edge_idx_l = edge_idx_l
self.off_set_l = off_set_l
self.uniform = uniform
def init_off_set(self, adj_list):
"""
Params
------
adj_list: List[List[int]]
"""
n_idx_l = []
n_ts_l = []
e_idx_l = []
off_set_l = [0]
for i in range(len(adj_list)):
curr = adj_list[i]
curr = sorted(curr, key=lambda x: x[1])
n_idx_l.extend([x[0] for x in curr])
e_idx_l.extend([x[1] for x in curr])
n_ts_l.extend([x[2] for x in curr])
off_set_l.append(len(n_idx_l))
n_idx_l = np.array(n_idx_l)
n_ts_l = np.array(n_ts_l)
e_idx_l = np.array(e_idx_l)
off_set_l = np.array(off_set_l)
assert(len(n_idx_l) == len(n_ts_l))
assert(off_set_l[-1] == len(n_ts_l))
return n_idx_l, n_ts_l, e_idx_l, off_set_l
def find_before(self, src_idx, cut_time):
"""
Params
------
src_idx: int
cut_time: float
"""
node_idx_l = self.node_idx_l
node_ts_l = self.node_ts_l
edge_idx_l = self.edge_idx_l
off_set_l = self.off_set_l
neighbors_idx = node_idx_l[off_set_l[src_idx]:off_set_l[src_idx + 1]]
neighbors_ts = node_ts_l[off_set_l[src_idx]:off_set_l[src_idx + 1]]
neighbors_e_idx = edge_idx_l[off_set_l[src_idx]:off_set_l[src_idx + 1]]
if len(neighbors_idx) == 0 or len(neighbors_ts) == 0:
return neighbors_idx, neighbors_ts, neighbors_e_idx
left = 0
right = len(neighbors_idx) - 1
while left + 1 < right:
mid = (left + right) // 2
curr_t = neighbors_ts[mid]
if curr_t < cut_time:
left = mid
else:
right = mid
if neighbors_ts[right] < cut_time:
return neighbors_idx[:right], neighbors_e_idx[:right], neighbors_ts[:right]
else:
return neighbors_idx[:left], neighbors_e_idx[:left], neighbors_ts[:left]
def get_temporal_neighbor(self, src_idx_l, cut_time_l, num_neighbors=20):
"""
Params
------
src_idx_l: List[int]
cut_time_l: List[float],
num_neighbors: int
"""
assert(len(src_idx_l) == len(cut_time_l))
out_ngh_node_batch = np.zeros((len(src_idx_l), num_neighbors)).astype(np.int32)
out_ngh_t_batch = np.zeros((len(src_idx_l), num_neighbors)).astype(np.float32)
out_ngh_eidx_batch = np.zeros((len(src_idx_l), num_neighbors)).astype(np.int32)
for i, (src_idx, cut_time) in enumerate(zip(src_idx_l, cut_time_l)):
ngh_idx, ngh_eidx, ngh_ts = self.find_before(src_idx, cut_time)
if len(ngh_idx) > 0:
if self.uniform:
sampled_idx = np.random.randint(0, len(ngh_idx), num_neighbors)
out_ngh_node_batch[i, :] = ngh_idx[sampled_idx]
out_ngh_t_batch[i, :] = ngh_ts[sampled_idx]
out_ngh_eidx_batch[i, :] = ngh_eidx[sampled_idx]
# resort based on time
pos = out_ngh_t_batch[i, :].argsort()
out_ngh_node_batch[i, :] = out_ngh_node_batch[i, :][pos]
out_ngh_t_batch[i, :] = out_ngh_t_batch[i, :][pos]
out_ngh_eidx_batch[i, :] = out_ngh_eidx_batch[i, :][pos]
else:
ngh_ts = ngh_ts[:num_neighbors]
ngh_idx = ngh_idx[:num_neighbors]
ngh_eidx = ngh_eidx[:num_neighbors]
assert(len(ngh_idx) <= num_neighbors)
assert(len(ngh_ts) <= num_neighbors)
assert(len(ngh_eidx) <= num_neighbors)
out_ngh_node_batch[i, num_neighbors - len(ngh_idx):] = ngh_idx
out_ngh_t_batch[i, num_neighbors - len(ngh_ts):] = ngh_ts
out_ngh_eidx_batch[i, num_neighbors - len(ngh_eidx):] = ngh_eidx
return out_ngh_node_batch, out_ngh_eidx_batch, out_ngh_t_batch
def find_k_hop(self, k, src_idx_l, cut_time_l, num_neighbors=20):
"""Sampling the k-hop sub graph
"""
x, y, z = self.get_temporal_neighbor(src_idx_l, cut_time_l, num_neighbors)
node_records = [x]
eidx_records = [y]
t_records = [z]
for _ in range(k -1):
ngn_node_est, ngh_t_est = node_records[-1], t_records[-1] # [N, *([num_neighbors] * (k - 1))]
orig_shape = ngn_node_est.shape
ngn_node_est = ngn_node_est.flatten()
ngn_t_est = ngh_t_est.flatten()
out_ngh_node_batch, out_ngh_eidx_batch, out_ngh_t_batch = self.get_temporal_neighbor(ngn_node_est, ngn_t_est, num_neighbors)
out_ngh_node_batch = out_ngh_node_batch.reshape(*orig_shape, num_neighbors) # [N, *([num_neighbors] * k)]
out_ngh_eidx_batch = out_ngh_eidx_batch.reshape(*orig_shape, num_neighbors)
out_ngh_t_batch = out_ngh_t_batch.reshape(*orig_shape, num_neighbors)
node_records.append(out_ngh_node_batch)
eidx_records.append(out_ngh_eidx_batch)
t_records.append(out_ngh_t_batch)
return node_records, eidx_records, t_records
| 6,042 | 37.246835 | 136 | py |
Inductive-representation-learning-on-temporal-graphs | Inductive-representation-learning-on-temporal-graphs-master/learn_edge.py | """Unified interface to all dynamic graph model experiments"""
import math
import logging
import time
import random
import sys
import argparse
import torch
import pandas as pd
import numpy as np
#import numba
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from module import TGAN
from graph import NeighborFinder
from utils import EarlyStopMonitor, RandEdgeSampler
### Argument and global variables
parser = argparse.ArgumentParser('Interface for TGAT experiments on link predictions')
parser.add_argument('-d', '--data', type=str, help='data sources to use, try wikipedia or reddit', default='wikipedia')
parser.add_argument('--bs', type=int, default=200, help='batch_size')
parser.add_argument('--prefix', type=str, default='', help='prefix to name the checkpoints')
parser.add_argument('--n_degree', type=int, default=20, help='number of neighbors to sample')
parser.add_argument('--n_head', type=int, default=2, help='number of heads used in attention layer')
parser.add_argument('--n_epoch', type=int, default=50, help='number of epochs')
parser.add_argument('--n_layer', type=int, default=2, help='number of network layers')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('--drop_out', type=float, default=0.1, help='dropout probability')
parser.add_argument('--gpu', type=int, default=0, help='idx for the gpu to use')
parser.add_argument('--node_dim', type=int, default=100, help='Dimentions of the node embedding')
parser.add_argument('--time_dim', type=int, default=100, help='Dimentions of the time embedding')
parser.add_argument('--agg_method', type=str, choices=['attn', 'lstm', 'mean'], help='local aggregation method', default='attn')
parser.add_argument('--attn_mode', type=str, choices=['prod', 'map'], default='prod', help='use dot product attention or mapping based')
parser.add_argument('--time', type=str, choices=['time', 'pos', 'empty'], help='how to use time information', default='time')
parser.add_argument('--uniform', action='store_true', help='take uniform sampling from temporal neighbors')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
BATCH_SIZE = args.bs
NUM_NEIGHBORS = args.n_degree
NUM_NEG = 1
NUM_EPOCH = args.n_epoch
NUM_HEADS = args.n_head
DROP_OUT = args.drop_out
GPU = args.gpu
UNIFORM = args.uniform
NEW_NODE = args.new_node
USE_TIME = args.time
AGG_METHOD = args.agg_method
ATTN_MODE = args.attn_mode
SEQ_LEN = NUM_NEIGHBORS
DATA = args.data
NUM_LAYER = args.n_layer
LEARNING_RATE = args.lr
NODE_DIM = args.node_dim
TIME_DIM = args.time_dim
MODEL_SAVE_PATH = f'./saved_models/{args.prefix}-{args.agg_method}-{args.attn_mode}-{args.data}.pth'
get_checkpoint_path = lambda epoch: f'./saved_checkpoints/{args.prefix}-{args.agg_method}-{args.attn_mode}-{args.data}-{epoch}.pth'
### set up logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('log/{}.log'.format(str(time.time())))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.info(args)
def eval_one_epoch(hint, tgan, sampler, src, dst, ts, label):
val_acc, val_ap, val_f1, val_auc = [], [], [], []
with torch.no_grad():
tgan = tgan.eval()
TEST_BATCH_SIZE=30
num_test_instance = len(src)
num_test_batch = math.ceil(num_test_instance / TEST_BATCH_SIZE)
for k in range(num_test_batch):
# percent = 100 * k / num_test_batch
# if k % int(0.2 * num_test_batch) == 0:
# logger.info('{0} progress: {1:10.4f}'.format(hint, percent))
s_idx = k * TEST_BATCH_SIZE
e_idx = min(num_test_instance - 1, s_idx + TEST_BATCH_SIZE)
src_l_cut = src[s_idx:e_idx]
dst_l_cut = dst[s_idx:e_idx]
ts_l_cut = ts[s_idx:e_idx]
# label_l_cut = label[s_idx:e_idx]
size = len(src_l_cut)
src_l_fake, dst_l_fake = sampler.sample(size)
pos_prob, neg_prob = tgan.contrast(src_l_cut, dst_l_cut, dst_l_fake, ts_l_cut, NUM_NEIGHBORS)
pred_score = np.concatenate([(pos_prob).cpu().numpy(), (neg_prob).cpu().numpy()])
pred_label = pred_score > 0.5
true_label = np.concatenate([np.ones(size), np.zeros(size)])
val_acc.append((pred_label == true_label).mean())
val_ap.append(average_precision_score(true_label, pred_score))
# val_f1.append(f1_score(true_label, pred_label))
val_auc.append(roc_auc_score(true_label, pred_score))
return np.mean(val_acc), np.mean(val_ap), np.mean(val_f1), np.mean(val_auc)
### Load data and train val test split
g_df = pd.read_csv('./processed/ml_{}.csv'.format(DATA))
e_feat = np.load('./processed/ml_{}.npy'.format(DATA))
n_feat = np.load('./processed/ml_{}_node.npy'.format(DATA))
val_time, test_time = list(np.quantile(g_df.ts, [0.70, 0.85]))
src_l = g_df.u.values
dst_l = g_df.i.values
e_idx_l = g_df.idx.values
label_l = g_df.label.values
ts_l = g_df.ts.values
max_src_index = src_l.max()
max_idx = max(src_l.max(), dst_l.max())
random.seed(2020)
total_node_set = set(np.unique(np.hstack([g_df.u.values, g_df.i.values])))
num_total_unique_nodes = len(total_node_set)
mask_node_set = set(random.sample(set(src_l[ts_l > val_time]).union(set(dst_l[ts_l > val_time])), int(0.1 * num_total_unique_nodes)))
mask_src_flag = g_df.u.map(lambda x: x in mask_node_set).values
mask_dst_flag = g_df.i.map(lambda x: x in mask_node_set).values
none_node_flag = (1 - mask_src_flag) * (1 - mask_dst_flag)
valid_train_flag = (ts_l <= val_time) * (none_node_flag > 0)
train_src_l = src_l[valid_train_flag]
train_dst_l = dst_l[valid_train_flag]
train_ts_l = ts_l[valid_train_flag]
train_e_idx_l = e_idx_l[valid_train_flag]
train_label_l = label_l[valid_train_flag]
# define the new nodes sets for testing inductiveness of the model
train_node_set = set(train_src_l).union(train_dst_l)
assert(len(train_node_set - mask_node_set) == len(train_node_set))
new_node_set = total_node_set - train_node_set
# select validation and test dataset
valid_val_flag = (ts_l <= test_time) * (ts_l > val_time)
valid_test_flag = ts_l > test_time
is_new_node_edge = np.array([(a in new_node_set or b in new_node_set) for a, b in zip(src_l, dst_l)])
nn_val_flag = valid_val_flag * is_new_node_edge
nn_test_flag = valid_test_flag * is_new_node_edge
# validation and test with all edges
val_src_l = src_l[valid_val_flag]
val_dst_l = dst_l[valid_val_flag]
val_ts_l = ts_l[valid_val_flag]
val_e_idx_l = e_idx_l[valid_val_flag]
val_label_l = label_l[valid_val_flag]
test_src_l = src_l[valid_test_flag]
test_dst_l = dst_l[valid_test_flag]
test_ts_l = ts_l[valid_test_flag]
test_e_idx_l = e_idx_l[valid_test_flag]
test_label_l = label_l[valid_test_flag]
# validation and test with edges that at least has one new node (not in training set)
nn_val_src_l = src_l[nn_val_flag]
nn_val_dst_l = dst_l[nn_val_flag]
nn_val_ts_l = ts_l[nn_val_flag]
nn_val_e_idx_l = e_idx_l[nn_val_flag]
nn_val_label_l = label_l[nn_val_flag]
nn_test_src_l = src_l[nn_test_flag]
nn_test_dst_l = dst_l[nn_test_flag]
nn_test_ts_l = ts_l[nn_test_flag]
nn_test_e_idx_l = e_idx_l[nn_test_flag]
nn_test_label_l = label_l[nn_test_flag]
### Initialize the data structure for graph and edge sampling
# build the graph for fast query
# graph only contains the training data (with 10% nodes removal)
adj_list = [[] for _ in range(max_idx + 1)]
for src, dst, eidx, ts in zip(train_src_l, train_dst_l, train_e_idx_l, train_ts_l):
adj_list[src].append((dst, eidx, ts))
adj_list[dst].append((src, eidx, ts))
train_ngh_finder = NeighborFinder(adj_list, uniform=UNIFORM)
# full graph with all the data for the test and validation purpose
full_adj_list = [[] for _ in range(max_idx + 1)]
for src, dst, eidx, ts in zip(src_l, dst_l, e_idx_l, ts_l):
full_adj_list[src].append((dst, eidx, ts))
full_adj_list[dst].append((src, eidx, ts))
full_ngh_finder = NeighborFinder(full_adj_list, uniform=UNIFORM)
train_rand_sampler = RandEdgeSampler(train_src_l, train_dst_l)
val_rand_sampler = RandEdgeSampler(src_l, dst_l)
nn_val_rand_sampler = RandEdgeSampler(nn_val_src_l, nn_val_dst_l)
test_rand_sampler = RandEdgeSampler(src_l, dst_l)
nn_test_rand_sampler = RandEdgeSampler(nn_test_src_l, nn_test_dst_l)
### Model initialize
device = torch.device('cuda:{}'.format(GPU))
tgan = TGAN(train_ngh_finder, n_feat, e_feat,
num_layers=NUM_LAYER, use_time=USE_TIME, agg_method=AGG_METHOD, attn_mode=ATTN_MODE,
seq_len=SEQ_LEN, n_head=NUM_HEADS, drop_out=DROP_OUT, node_dim=NODE_DIM, time_dim=TIME_DIM)
optimizer = torch.optim.Adam(tgan.parameters(), lr=LEARNING_RATE)
criterion = torch.nn.BCELoss()
tgan = tgan.to(device)
num_instance = len(train_src_l)
num_batch = math.ceil(num_instance / BATCH_SIZE)
logger.info('num of training instances: {}'.format(num_instance))
logger.info('num of batches per epoch: {}'.format(num_batch))
idx_list = np.arange(num_instance)
np.random.shuffle(idx_list)
early_stopper = EarlyStopMonitor()
for epoch in range(NUM_EPOCH):
# Training
# training use only training graph
tgan.ngh_finder = train_ngh_finder
acc, ap, f1, auc, m_loss = [], [], [], [], []
np.random.shuffle(idx_list)
logger.info('start {} epoch'.format(epoch))
for k in range(num_batch):
# percent = 100 * k / num_batch
# if k % int(0.2 * num_batch) == 0:
# logger.info('progress: {0:10.4f}'.format(percent))
s_idx = k * BATCH_SIZE
e_idx = min(num_instance - 1, s_idx + BATCH_SIZE)
src_l_cut, dst_l_cut = train_src_l[s_idx:e_idx], train_dst_l[s_idx:e_idx]
ts_l_cut = train_ts_l[s_idx:e_idx]
label_l_cut = train_label_l[s_idx:e_idx]
size = len(src_l_cut)
src_l_fake, dst_l_fake = train_rand_sampler.sample(size)
with torch.no_grad():
pos_label = torch.ones(size, dtype=torch.float, device=device)
neg_label = torch.zeros(size, dtype=torch.float, device=device)
optimizer.zero_grad()
tgan = tgan.train()
pos_prob, neg_prob = tgan.contrast(src_l_cut, dst_l_cut, dst_l_fake, ts_l_cut, NUM_NEIGHBORS)
loss = criterion(pos_prob, pos_label)
loss += criterion(neg_prob, neg_label)
loss.backward()
optimizer.step()
# get training results
with torch.no_grad():
tgan = tgan.eval()
pred_score = np.concatenate([(pos_prob).cpu().detach().numpy(), (neg_prob).cpu().detach().numpy()])
pred_label = pred_score > 0.5
true_label = np.concatenate([np.ones(size), np.zeros(size)])
acc.append((pred_label == true_label).mean())
ap.append(average_precision_score(true_label, pred_score))
# f1.append(f1_score(true_label, pred_label))
m_loss.append(loss.item())
auc.append(roc_auc_score(true_label, pred_score))
# validation phase use all information
tgan.ngh_finder = full_ngh_finder
val_acc, val_ap, val_f1, val_auc = eval_one_epoch('val for old nodes', tgan, val_rand_sampler, val_src_l,
val_dst_l, val_ts_l, val_label_l)
nn_val_acc, nn_val_ap, nn_val_f1, nn_val_auc = eval_one_epoch('val for new nodes', tgan, val_rand_sampler, nn_val_src_l,
nn_val_dst_l, nn_val_ts_l, nn_val_label_l)
logger.info('epoch: {}:'.format(epoch))
logger.info('Epoch mean loss: {}'.format(np.mean(m_loss)))
logger.info('train acc: {}, val acc: {}, new node val acc: {}'.format(np.mean(acc), val_acc, nn_val_acc))
logger.info('train auc: {}, val auc: {}, new node val auc: {}'.format(np.mean(auc), val_auc, nn_val_auc))
logger.info('train ap: {}, val ap: {}, new node val ap: {}'.format(np.mean(ap), val_ap, nn_val_ap))
# logger.info('train f1: {}, val f1: {}, new node val f1: {}'.format(np.mean(f1), val_f1, nn_val_f1))
if early_stopper.early_stop_check(val_ap):
logger.info('No improvment over {} epochs, stop training'.format(early_stopper.max_round))
logger.info(f'Loading the best model at epoch {early_stopper.best_epoch}')
best_model_path = get_checkpoint_path(early_stopper.best_epoch)
tgan.load_state_dict(torch.load(best_model_path))
logger.info(f'Loaded the best model at epoch {early_stopper.best_epoch} for inference')
tgan.eval()
break
else:
torch.save(tgan.state_dict(), get_checkpoint_path(epoch))
# testing phase use all information
tgan.ngh_finder = full_ngh_finder
test_acc, test_ap, test_f1, test_auc = eval_one_epoch('test for old nodes', tgan, test_rand_sampler, test_src_l,
test_dst_l, test_ts_l, test_label_l)
nn_test_acc, nn_test_ap, nn_test_f1, nn_test_auc = eval_one_epoch('test for new nodes', tgan, nn_test_rand_sampler, nn_test_src_l,
nn_test_dst_l, nn_test_ts_l, nn_test_label_l)
logger.info('Test statistics: Old nodes -- acc: {}, auc: {}, ap: {}'.format(test_acc, test_auc, test_ap))
logger.info('Test statistics: New nodes -- acc: {}, auc: {}, ap: {}'.format(nn_test_acc, nn_test_auc, nn_test_ap))
logger.info('Saving TGAN model')
torch.save(tgan.state_dict(), MODEL_SAVE_PATH)
logger.info('TGAN models saved')
| 13,549 | 40.820988 | 136 | py |
Inductive-representation-learning-on-temporal-graphs | Inductive-representation-learning-on-temporal-graphs-master/learn_node.py | """Unified interface to all dynamic graph model experiments"""
import math
import logging
import time
import sys
import random
import argparse
from tqdm import tqdm
import torch
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from module import TGAN
from graph import NeighborFinder
class LR(torch.nn.Module):
def __init__(self, dim, drop=0.3):
super().__init__()
self.fc_1 = torch.nn.Linear(dim, 80)
self.fc_2 = torch.nn.Linear(80, 10)
self.fc_3 = torch.nn.Linear(10, 1)
self.act = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(p=drop, inplace=True)
def forward(self, x):
x = self.act(self.fc_1(x))
x = self.dropout(x)
x = self.act(self.fc_2(x))
x = self.dropout(x)
return self.fc_3(x).squeeze(dim=1)
random.seed(222)
np.random.seed(222)
torch.manual_seed(222)
### Argument and global variables
parser = argparse.ArgumentParser('Interface for TGAT experiments on node classification')
parser.add_argument('-d', '--data', type=str, help='data sources to use, try wikipedia or reddit', default='wikipedia')
parser.add_argument('--bs', type=int, default=30, help='batch_size')
parser.add_argument('--prefix', type=str, default='')
parser.add_argument('--n_degree', type=int, default=50, help='number of neighbors to sample')
parser.add_argument('--n_neg', type=int, default=1)
parser.add_argument('--n_head', type=int, default=2)
parser.add_argument('--n_epoch', type=int, default=15, help='number of epochs')
parser.add_argument('--n_layer', type=int, default=2)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--tune', action='store_true', help='parameters tunning mode, use train-test split on training data only.')
parser.add_argument('--drop_out', type=float, default=0.1, help='dropout probability')
parser.add_argument('--gpu', type=int, default=0, help='idx for the gpu to use')
parser.add_argument('--node_dim', type=int, default=None, help='Dimentions of the node embedding')
parser.add_argument('--time_dim', type=int, default=None, help='Dimentions of the time embedding')
parser.add_argument('--agg_method', type=str, choices=['attn', 'lstm', 'mean'], help='local aggregation method', default='attn')
parser.add_argument('--attn_mode', type=str, choices=['prod', 'map'], default='prod')
parser.add_argument('--time', type=str, choices=['time', 'pos', 'empty'], help='how to use time information', default='time')
parser.add_argument('--new_node', action='store_true', help='model new node')
parser.add_argument('--uniform', action='store_true', help='take uniform sampling from temporal neighbors')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
BATCH_SIZE = args.bs
NUM_NEIGHBORS = args.n_degree
NUM_NEG = 1
NUM_EPOCH = args.n_epoch
NUM_HEADS = args.n_head
DROP_OUT = args.drop_out
GPU = args.gpu
UNIFORM = args.uniform
NEW_NODE = args.new_node
USE_TIME = args.time
AGG_METHOD = args.agg_method
ATTN_MODE = args.attn_mode
SEQ_LEN = NUM_NEIGHBORS
DATA = args.data
NUM_LAYER = args.n_layer
LEARNING_RATE = args.lr
NODE_LAYER = 1
NODE_DIM = args.node_dim
TIME_DIM = args.time_dim
### set up logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('log/{}.log'.format(str(time.time())))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.info(args)
### Load data and train val test split
g_df = pd.read_csv('./processed/ml_{}.csv'.format(DATA))
e_feat = np.load('./processed/ml_{}.npy'.format(DATA))
n_feat = np.load('./processed/ml_{}_node.npy'.format(DATA))
val_time, test_time = list(np.quantile(g_df.ts, [0.70, 0.85]))
src_l = g_df.u.values
dst_l = g_df.i.values
e_idx_l = g_df.idx.values
label_l = g_df.label.values
ts_l = g_df.ts.values
max_src_index = src_l.max()
max_idx = max(src_l.max(), dst_l.max())
total_node_set = set(np.unique(np.hstack([g_df.u.values, g_df.i.values])))
valid_train_flag = (ts_l <= test_time)
valid_val_flag = (ts_l <= test_time)
assignment = np.random.randint(0, 10, len(valid_train_flag))
valid_train_flag *= (assignment >= 2)
valid_val_flag *= (assignment < 2)
valid_test_flag = ts_l > test_time
if args.tune:
train_src_l = src_l[valid_train_flag]
train_dst_l = dst_l[valid_train_flag]
train_ts_l = ts_l[valid_train_flag]
train_e_idx_l = e_idx_l[valid_train_flag]
train_label_l = label_l[valid_train_flag]
# use the validation as test dataset
test_src_l = src_l[valid_val_flag]
test_dst_l = dst_l[valid_val_flag]
test_ts_l = ts_l[valid_val_flag]
test_e_idx_l = e_idx_l[valid_val_flag]
test_label_l = label_l[valid_val_flag]
else:
logger.info('Training use all train data')
valid_train_flag = (ts_l <= test_time)
train_src_l = src_l[valid_train_flag]
train_dst_l = dst_l[valid_train_flag]
train_ts_l = ts_l[valid_train_flag]
train_e_idx_l = e_idx_l[valid_train_flag]
train_label_l = label_l[valid_train_flag]
# use the true test dataset
test_src_l = src_l[valid_test_flag]
test_dst_l = dst_l[valid_test_flag]
test_ts_l = ts_l[valid_test_flag]
test_e_idx_l = e_idx_l[valid_test_flag]
test_label_l = label_l[valid_test_flag]
### Initialize the data structure for graph and edge sampling
adj_list = [[] for _ in range(max_idx + 1)]
for src, dst, eidx, ts in zip(train_src_l, train_dst_l, train_e_idx_l, train_ts_l):
adj_list[src].append((dst, eidx, ts))
adj_list[dst].append((src, eidx, ts))
train_ngh_finder = NeighborFinder(adj_list, uniform=UNIFORM)
# full graph with all the data for the test and validation purpose
full_adj_list = [[] for _ in range(max_idx + 1)]
for src, dst, eidx, ts in zip(src_l, dst_l, e_idx_l, ts_l):
full_adj_list[src].append((dst, eidx, ts))
full_adj_list[dst].append((src, eidx, ts))
full_ngh_finder = NeighborFinder(full_adj_list, uniform=UNIFORM)
### Model initialize
device = torch.device('cuda:{}'.format(GPU))
tgan = TGAN(train_ngh_finder, n_feat, e_feat,
num_layers=NUM_LAYER, use_time=USE_TIME, agg_method=AGG_METHOD, attn_mode=ATTN_MODE,
seq_len=SEQ_LEN, n_head=NUM_HEADS, drop_out=DROP_OUT, node_dim=NODE_DIM, time_dim=TIME_DIM)
# optimizer = torch.optim.Adam(tgan.parameters(), lr=LEARNING_RATE)
# criterion = torch.nn.BCELoss()
tgan = tgan.to(device)
num_instance = len(train_src_l)
num_batch = math.ceil(num_instance / BATCH_SIZE)
logger.debug('num of training instances: {}'.format(num_instance))
logger.debug('num of batches per epoch: {}'.format(num_batch))
idx_list = np.arange(num_instance)
np.random.shuffle(idx_list)
logger.info('loading saved TGAN model')
model_path = f'./saved_models/{args.prefix}-{args.agg_method}-{args.attn_mode}-{DATA}.pth'
tgan.load_state_dict(torch.load(model_path))
tgan.eval()
logger.info('TGAN models loaded')
logger.info('Start training node classification task')
lr_model = LR(n_feat.shape[1])
lr_optimizer = torch.optim.Adam(lr_model.parameters(), lr=args.lr)
lr_model = lr_model.to(device)
tgan.ngh_finder = full_ngh_finder
idx_list = np.arange(len(train_src_l))
lr_criterion = torch.nn.BCELoss()
lr_criterion_eval = torch.nn.BCELoss()
def eval_epoch(src_l, dst_l, ts_l, label_l, batch_size, lr_model, tgan, num_layer=NODE_LAYER):
pred_prob = np.zeros(len(src_l))
loss = 0
num_instance = len(src_l)
num_batch = math.ceil(num_instance / batch_size)
with torch.no_grad():
lr_model.eval()
tgan.eval()
for k in range(num_batch):
s_idx = k * batch_size
e_idx = min(num_instance - 1, s_idx + batch_size)
src_l_cut = src_l[s_idx:e_idx]
dst_l_cut = dst_l[s_idx:e_idx]
ts_l_cut = ts_l[s_idx:e_idx]
label_l_cut = label_l[s_idx:e_idx]
size = len(src_l_cut)
src_embed = tgan.tem_conv(src_l_cut, ts_l_cut, num_layer)
src_label = torch.from_numpy(label_l_cut).float().to(device)
lr_prob = lr_model(src_embed).sigmoid()
loss += lr_criterion_eval(lr_prob, src_label).item()
pred_prob[s_idx:e_idx] = lr_prob.cpu().numpy()
auc_roc = roc_auc_score(label_l, pred_prob)
return auc_roc, loss / num_instance
for epoch in tqdm(range(args.n_epoch)):
lr_pred_prob = np.zeros(len(train_src_l))
np.random.shuffle(idx_list)
tgan = tgan.eval()
lr_model = lr_model.train()
#num_batch
for k in range(num_batch):
s_idx = k * BATCH_SIZE
e_idx = min(num_instance - 1, s_idx + BATCH_SIZE)
src_l_cut = train_src_l[s_idx:e_idx]
dst_l_cut = train_dst_l[s_idx:e_idx]
ts_l_cut = train_ts_l[s_idx:e_idx]
label_l_cut = train_label_l[s_idx:e_idx]
size = len(src_l_cut)
lr_optimizer.zero_grad()
with torch.no_grad():
src_embed = tgan.tem_conv(src_l_cut, ts_l_cut, NODE_LAYER)
src_label = torch.from_numpy(label_l_cut).float().to(device)
lr_prob = lr_model(src_embed).sigmoid()
lr_loss = lr_criterion(lr_prob, src_label)
lr_loss.backward()
lr_optimizer.step()
train_auc, train_loss = eval_epoch(train_src_l, train_dst_l, train_ts_l, train_label_l, BATCH_SIZE, lr_model, tgan)
test_auc, test_loss = eval_epoch(test_src_l, test_dst_l, test_ts_l, test_label_l, BATCH_SIZE, lr_model, tgan)
#torch.save(lr_model.state_dict(), './saved_models/edge_{}_wkiki_node_class.pth'.format(DATA))
logger.info(f'train auc: {train_auc}, test auc: {test_auc}')
test_auc, test_loss = eval_epoch(test_src_l, test_dst_l, test_ts_l, test_label_l, BATCH_SIZE, lr_model, tgan)
#torch.save(lr_model.state_dict(), './saved_models/edge_{}_wkiki_node_class.pth'.format(DATA))
logger.info(f'test auc: {test_auc}')
| 10,050 | 35.549091 | 128 | py |
MoRig | MoRig-master/training/train_skin.py | import sys
sys.path.append("./")
import torch
import torch.backends.cudnn as cudnn
from torch_geometric.loader import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os, shutil, argparse, numpy as np
import models
from models.customized_losses import cross_entropy_with_probs, skin_difference_loss, multi_positive_infonce_skinning, multi_pos_infoNCE, log_ratio_loss
from datasets.dataset_rig import RigDataset
from utils.log_utils import AverageMeter
from utils.os_utils import isdir, mkdir_p, isfile
from utils.io_utils import output_rigging
from utils.rig_parser import Rig
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar', snapshot=None):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if snapshot and state['epoch'] % snapshot == 0:
shutil.copyfile(filepath, os.path.join(checkpoint, 'checkpoint_{}.pth.tar'.format(state['epoch'])))
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def get_bone_names(skin_filename):
with open(skin_filename, 'r') as fin:
lines = fin.readlines()
bone_names = []
for li in lines:
words = li.strip().split()
if words[0] == 'bones':
bone_names.append([words[1], words[2]])
return bone_names
def post_filter(skin_weights, topology_edge, num_ring=1):
skin_weights_new = np.zeros_like(skin_weights)
for v in range(len(skin_weights)):
adj_verts_multi_ring = []
current_seeds = [v]
for r in range(num_ring):
adj_verts = []
for seed in current_seeds:
adj_edges = topology_edge[:, np.argwhere(topology_edge == seed)[:, 1]]
adj_verts_seed = list(set(adj_edges.flatten().tolist()))
adj_verts_seed.remove(seed)
adj_verts += adj_verts_seed
adj_verts_multi_ring += adj_verts
current_seeds = adj_verts
adj_verts_multi_ring = list(set(adj_verts_multi_ring))
if v in adj_verts_multi_ring:
adj_verts_multi_ring.remove(v)
skin_weights_neighbor = [skin_weights[int(i), :][np.newaxis, :] for i in adj_verts_multi_ring]
skin_weights_neighbor = np.concatenate(skin_weights_neighbor, axis=0)
#max_bone_id = np.argmax(skin_weights[v, :])
#if np.sum(skin_weights_neighbor[:, max_bone_id]) < 0.17 * len(skin_weights_neighbor):
# skin_weights_new[v, :] = np.mean(skin_weights_neighbor, axis=0)
#else:
# skin_weights_new[v, :] = skin_weights[v, :]
skin_weights_new[v, :] = np.mean(skin_weights_neighbor, axis=0)
#skin_weights_new[skin_weights_new.sum(axis=1) == 0, :] = skin_weights[skin_weights_new.sum(axis=1) == 0, :]
return skin_weights_new
def main(args):
global device
lowest_loss = 1e20
# create checkpoint dir and log dir
if not isdir(args.checkpoint):
print("Create new checkpoint folder " + args.checkpoint)
mkdir_p(args.checkpoint)
if not args.resume:
if isdir(args.logdir):
shutil.rmtree(args.logdir)
mkdir_p(args.logdir)
# create model
model = models.__dict__[args.arch](nearest_bone=args.nearest_bone,
use_motion=True,
use_Dg=args.Dg, use_Lf=args.Lf,
motion_dim=args.motion_dim,
num_keyframes=args.num_keyframes,
aggr_method=args.aggr_method)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
lowest_loss = checkpoint['lowest_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
train_loader = DataLoader(RigDataset(root=args.train_folder), batch_size=args.train_batch, shuffle=True)
val_loader = DataLoader(RigDataset(root=args.val_folder), batch_size=args.test_batch, shuffle=False)
test_loader = DataLoader(RigDataset(root=args.test_folder), batch_size=args.test_batch, shuffle=False)
if args.evaluate:
print('\nEvaluation only')
test_losses = test(test_loader, model, args, save_result=True, best_epoch=args.start_epoch)
for loss_name, loss_value in test_losses.items():
print(f"test_{loss_name}: {loss_value:6f}. ", end="")
return
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.schedule, gamma=args.gamma)
logger = SummaryWriter(log_dir=args.logdir)
for epoch in range(args.start_epoch, args.epochs):
lr = scheduler.get_last_lr()
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr[0]))
train_losses = train(train_loader, model, optimizer, args)
val_losses = test(val_loader, model, args)
test_losses = test(test_loader, model, args)
scheduler.step()
losses = [train_losses, val_losses, test_losses]
for split_id, split_name in enumerate(["train", "val", "test"]):
print(f"Epoch{epoch + 1}. ", end="")
for loss_name, loss_value in losses[split_id].items():
print(f"{split_name}_{loss_name}: {loss_value:6f}. ", end="")
logger.add_scalar(f"{split_name}_{loss_name}", loss_value, epoch + 1)
print("")
# remember best acc and save checkpoint
is_best = val_losses["total_loss"] < lowest_loss
lowest_loss = min(val_losses["total_loss"], lowest_loss)
save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'lowest_loss': lowest_loss,
'optimizer': optimizer.state_dict()}, is_best, checkpoint=args.checkpoint)
def train(train_loader, model, optimizer, args):
global device
model.train() # switch to train mode
loss_skin_meter = AverageMeter()
loss_motion_meter = AverageMeter()
loss_meter = AverageMeter()
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
if np.random.uniform() > 0.5:
input_flow = data.gt_flow
else:
input_flow = data.pred_flow
motion_all, motion_aggr, skin_pred = model(data, input_flow)
if args.loss_cont == "logratio":
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += log_ratio_loss(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += log_ratio_loss(motion_aggr, data.gt_skin, data.batch)
loss_embedding = 0.1 * loss_embedding
else:
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += multi_pos_infoNCE(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += multi_pos_infoNCE(motion_aggr, data.gt_skin, data.batch)
loss_embedding = 0.01 * loss_embedding
# skinning prediction loss
skin_gt = data.skin_label[:, 0:args.nearest_bone]
loss_mask_batch = data.loss_mask.float()[:, 0:args.nearest_bone]
skin_gt = skin_gt * loss_mask_batch
skin_gt = skin_gt / (torch.sum(torch.abs(skin_gt), dim=1, keepdim=True) + 1e-8)
vert_mask = (torch.abs(skin_gt.sum(dim=1) - 1.0) < 1e-8).float() # mask out vertices whose skinning is missing from the picked K bones.
loss_skin = cross_entropy_with_probs(skin_pred, skin_gt, reduction='none')
loss_skin = (loss_skin * loss_mask_batch * vert_mask.unsqueeze(1)).sum() / (loss_mask_batch * vert_mask.unsqueeze(1)).sum()
loss = loss_skin + loss_embedding
loss.backward()
optimizer.step()
loss_skin_meter.update(loss_skin.item())
loss_motion_meter.update(loss_embedding.item())
loss_meter.update(loss.item())
return {"loss_skin": loss_skin_meter.avg, "loss_motion": loss_motion_meter.avg,
"total_loss": loss_meter.avg}
def test(test_loader, model, args, save_result=False, best_epoch=None):
global device
model.eval() # switch to test mode
loss_skin_meter = AverageMeter()
loss_motion_meter = AverageMeter()
loss_meter = AverageMeter()
outdir = args.checkpoint.split('/')[-1]
for data in test_loader:
data = data.to(device)
with torch.no_grad():
input_flow = data.pred_flow
motion_all, motion_aggr, skin_pred = model(data, input_flow)
if args.loss_cont == "logratio":
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += log_ratio_loss(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += log_ratio_loss(motion_aggr, data.gt_skin, data.batch)
loss_embedding = loss_embedding * 0.01
else:
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += multi_pos_infoNCE(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += multi_pos_infoNCE(motion_aggr, data.gt_skin, data.batch)
loss_embedding = 0.1 * loss_embedding
# skinning prediction loss
skin_gt = data.skin_label[:, 0:args.nearest_bone]
loss_mask_batch = data.loss_mask.float()[:, 0:args.nearest_bone]
skin_gt = skin_gt * loss_mask_batch
skin_gt = skin_gt / (torch.sum(torch.abs(skin_gt), dim=1, keepdim=True) + 1e-8)
vert_mask = (torch.abs(skin_gt.sum(dim=1) - 1.0) < 1e-8).float() # mask out vertices whose skinning is missing from the picked K bones.
loss_skin = cross_entropy_with_probs(skin_pred, skin_gt, reduction='none')
loss_skin = (loss_skin * loss_mask_batch * vert_mask.unsqueeze(1)).sum() / (loss_mask_batch * vert_mask.unsqueeze(1)).sum()
loss = loss_skin + loss_embedding
loss_skin_meter.update(loss_skin.item())
loss_motion_meter.update(loss_embedding.item())
loss_meter.update(loss.item())
if save_result:
output_folder = 'results/{:s}/best_{:d}/'.format(outdir, best_epoch)
if not os.path.exists(output_folder):
mkdir_p(output_folder)
for i in range(len(torch.unique(data.batch))):
print('output result for model {:d}'.format(data.name[i].item()))
skin_pred_i = skin_pred[data.batch == i]
bone_names = get_bone_names(os.path.join(args.test_folder, "{:d}_skin.txt".format(data.name[i].item())))
tpl_e = np.loadtxt(os.path.join(args.test_folder, "{:d}_tpl_e.txt".format(data.name[i].item()))).T
loss_mask_sample = data.loss_mask.float()[data.batch == i, 0:args.nearest_bone]
skin_pred_i = torch.softmax(skin_pred_i, dim=1)
skin_pred_i = skin_pred_i * loss_mask_sample
skin_nn_i = data.skin_nn[data.batch == i, 0:args.nearest_bone]
skin_pred_asarray = np.zeros((len(skin_pred_i), len(bone_names)))
for v in range(len(skin_pred_i)):
for nn_id in range(len(skin_nn_i[v, :])):
if loss_mask_sample[v, nn_id] == 1:
skin_pred_asarray[v, skin_nn_i[v, nn_id]] = skin_pred_i[v, nn_id]
skin_pred_asarray = post_filter(skin_pred_asarray, tpl_e, num_ring=1)
skin_pred_asarray[skin_pred_asarray < np.max(skin_pred_asarray, axis=1, keepdims=True) * 0.5] = 0.0
skin_pred_asarray = skin_pred_asarray / (skin_pred_asarray.sum(axis=1, keepdims=True) + 1e-10)
with open(os.path.join(output_folder, "{:d}_bone_names.txt".format(data.name[i].item())), 'w') as fout:
for bone_name in bone_names:
fout.write("{:s} {:s}\n".format(bone_name[0], bone_name[1]))
np.save(os.path.join(output_folder, "{:d}_full_pred.npy".format(data.name[i].item())), skin_pred_asarray)
skel_filename = os.path.join(args.info_folder, "{:d}.txt".format(data.name[i].item()))
output_rigging(skel_filename, skin_pred_asarray, output_folder, data.name[i].item())
return {"loss_skin": loss_skin_meter.avg, "loss_motion": loss_motion_meter.avg,
"total_loss": loss_meter.avg}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='skinnet network')
parser.add_argument('--arch', default='skinnet_motion')
parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, help='weight decay (default: 0)')
parser.add_argument('--gamma', type=float, default=0.5, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--epochs', default=100, type=int, help='number of total epochs to run')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float, help='initial learning rate')
parser.add_argument('--schedule', type=int, nargs='+', default=[], help='Decrease learning rate at these epochs.')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on val/test set')
parser.add_argument('--train_batch', default=2, type=int, help='train batchsize')
parser.add_argument('--test_batch', default=2, type=int, help='test batchsize')
parser.add_argument('-c', '--checkpoint', default='checkpoints/test', type=str, help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--logdir', default='logs/test', type=str, help='directory to save logs')
parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--train_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/train/',
type=str, help='folder of training data')
parser.add_argument('--val_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/val/',
type=str, help='folder of validation data')
parser.add_argument('--test_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test/', type=str, help='folder of testing data')
parser.add_argument('--nearest_bone', type=int, default=5)
parser.add_argument('--info_folder',default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/rig_info_remesh/', type=str, help='folder of rig information')
parser.add_argument('--loss_cont', type=str, default="infonce") # logratio, infonce
parser.add_argument('--Dg', action='store_true', help='input inverset geodesic as addtional feature')
parser.add_argument('--Lf', action='store_true', help='input isleaf indicator as addtional feature')
parser.add_argument('--num_keyframes', default=5, type=int)
parser.add_argument('--aggr_method', default="attn", type=str, choices=["max", "mean", "attn"])
parser.add_argument('--motion_dim', default=32, type=int)
print(parser.parse_args())
main(parser.parse_args())
| 15,989 | 54.714286 | 155 | py |
MoRig | MoRig-master/training/train_deform_shape.py | import sys
sys.path.append("./")
import os, shutil, argparse, numpy as np, glob
import torch
import torch.backends.cudnn as cudnn
from torch_geometric.loader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from utils.log_utils import AverageMeter
from utils.os_utils import isdir, mkdir_p, isfile
from utils.io_utils import save_checkpoint
from datasets.dataset_shape import ModelsResourcesShapeDataset
import models
from models.customized_losses import infoNCE
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(train_loader, model, optimizer, args):
global device
model.train() # switch to train mode
loss_flow_meter = AverageMeter()
loss_match_meter = AverageMeter()
loss_mask_meter = AverageMeter()
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
pred_flow, vtx_feature, pts_feature, pred_vismask, tau_nce = model(data)
loss_flow = torch.nn.functional.l1_loss(pred_flow, data.flow)
if args.train_extractor:
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch, data.pts_batch,
data.corr_v2p_batch, data.corr_p2v_batch, tau=tau_nce)
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_match = torch.zeros(1).to(loss_flow.device)
loss_mask = torch.zeros(1).to(loss_flow.device)
loss = loss_match + loss_mask + loss_flow
loss.backward()
optimizer.step()
loss_flow_meter.update(loss_flow.item(), n=len(data.name))
loss_match_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
return {"flow_loss": loss_flow_meter.avg, "corr_loss": loss_match_meter.avg, "vis_loss": loss_mask_meter.avg}
def test(test_loader, model, args, save_result=False):
global device
model.eval() # switch to test mode
loss_flow_meter = AverageMeter()
loss_match_meter = AverageMeter()
loss_mask_meter = AverageMeter()
for data in test_loader:
data = data.to(device)
with torch.no_grad():
pred_flow, vtx_feature, pts_feature, pred_vismask, tau_nce = model(data)
loss_flow = torch.nn.functional.mse_loss(pred_flow, data.flow)
if args.train_extractor:
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch,
data.pts_batch, data.corr_v2p_batch, data.corr_p2v_batch, tau=tau_nce)
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_match = torch.zeros(1).to(loss_flow.device)
loss_mask = torch.zeros(1).to(loss_flow.device)
loss_flow_meter.update(loss_flow.item(), n=len(data.name))
loss_match_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
if save_result:
for i in range(len(data.name)):
model_name = data.name[i]
print("processing: ", model_name)
pred_flow_np = pred_flow[data.vtx_batch==i].to("cpu").numpy()
gt_flow_np = data.flow[data.vtx_batch==i].to("cpu").numpy()
pts_i = data.pts[data.pts_batch==i].to("cpu").numpy()
vtx_i = data.vtx[data.vtx_batch==i].to("cpu").numpy()
vtx_shift_i = vtx_i + pred_flow_np
np.save("/mnt/neghvar/mnt/DATA_LINUX/zhan/output/mr_flownetG/{:s}_src_vtx.npy".format(model_name), vtx_i)
np.save("/mnt/neghvar/mnt/DATA_LINUX/zhan/output/mr_flownetG/{:s}_shift_vtx.npy".format(model_name), vtx_shift_i)
np.save("/mnt/neghvar/mnt/DATA_LINUX/zhan/output/mr_flownetG/{:s}_tar_pts.npy".format(model_name), pts_i)
return {"flow_loss": loss_flow_meter.avg, "corr_loss": loss_match_meter.avg, "vis_loss": loss_mask_meter.avg}
def main(args):
global device
lowest_loss = 1e20
# create checkpoint dir and log dir
if not isdir(args.checkpoint):
print("Create new checkpoint folder " + args.checkpoint)
mkdir_p(args.checkpoint)
if not args.resume:
if isdir(args.logdir):
shutil.rmtree(args.logdir)
mkdir_p(args.logdir)
# create model
model = models.__dict__[args.arch](tau_nce=args.tau_nce, num_interp=args.num_interp)
model.to(device)
model.corr_extractor.load_state_dict(torch.load(args.init_extractor)['state_dict'])
if not args.train_extractor:
for name, param in model.corr_extractor.named_parameters():
param.requires_grad = False
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.Adam([{"params": model.corr_extractor.parameters(), "lr": 1e-4},
{'params': model.voting.parameters()},
{'params': model.completing.parameters()}],
lr=args.lr, weight_decay=args.weight_decay)
if args.resume: # optionally resume from a checkpoint
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
#args.start_epoch = checkpoint['epoch']
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
train_loader = DataLoader(ModelsResourcesShapeDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True,
follow_batch=['vtx', 'pts', 'corr_v2p', 'corr_p2v'], num_workers=args.num_workers)
val_loader = DataLoader(ModelsResourcesShapeDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False,
follow_batch=['vtx', 'pts', 'corr_v2p', 'corr_p2v'], num_workers=args.num_workers)
test_loader = DataLoader(ModelsResourcesShapeDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False,
follow_batch=['vtx', 'pts', 'corr_v2p', 'corr_p2v'], num_workers=args.num_workers)
if args.evaluate:
print('\nEvaluation only')
test_losses = test(test_loader, model, args, save_result=True)
for loss_name, loss_value in test_losses.items():
print(f"test_{loss_name}: {loss_value:6f}. ", end="")
return
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.schedule, gamma=args.gamma)
logger = SummaryWriter(log_dir=args.logdir)
for epoch in range(args.start_epoch, args.epochs):
lr = scheduler.get_last_lr()
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr[0]))
train_losses = train(train_loader, model, optimizer, args)
val_losses = test(val_loader, model, args)
test_losses = test(test_loader, model, args)
scheduler.step()
# remember best acc and save checkpoint
is_best = (val_losses["flow_loss"] + val_losses["corr_loss"] + val_losses["vis_loss"]) < lowest_loss
lowest_loss = min((val_losses["flow_loss"] + val_losses["corr_loss"] + val_losses["vis_loss"]), lowest_loss)
save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'lowest_loss': lowest_loss,
'optimizer': optimizer.state_dict()},
is_best, checkpoint=args.checkpoint)
losses = [train_losses, val_losses, test_losses]
for split_id, split_name in enumerate(["train", "val", "test"]):
print(f"Epoch{epoch + 1}. ", end="")
for loss_name, loss_value in losses[split_id].items():
print(f"{split_name}_{loss_name}: {loss_value:6f}. ", end="")
logger.add_scalar(f"{split_name}_{loss_name}", loss_value, epoch + 1)
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='scan mesh corresponce')
parser.add_argument('--arch', default='deformnet')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 0)')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--epochs', default=120, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--lr', '--learning-rate', default=5e-4, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--schedule', type=int, nargs='+', default=[40, 100], help='Decrease learning rate at these epochs.')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on val/test set')
parser.add_argument('--num_workers', default=1, type=int, help='number of workers to load data')
parser.add_argument('--train_batch', default=2, type=int, metavar='N', help='train batchsize')
parser.add_argument('--test_batch', default=2, type=int, metavar='N', help='test batchsize')
parser.add_argument('-c', '--checkpoint', default='checkpoints/test', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--logdir', default='logs/test', type=str, metavar='LOG', help='directory to save logs')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--train_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/train_deform/',
type=str, help='folder of training data') #/mnt/neghvar
parser.add_argument('--val_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/val_deform/',
type=str, help='folder of validation data') #/mnt/neghvar
parser.add_argument('--test_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test_deform/',
type=str, help='folder of testing data') #/mnt/neghvar
parser.add_argument('--init_extractor', default='checkpoints/corr_s/model_best.pth.tar',
type=str, help='folder of testing data')
parser.add_argument('--num_frames', default=5, type=int, help='random sample points from the next num_frame frames')
parser.add_argument('--tau_nce', default=0.07, type=float, help='temprature in hardest nce loss')
parser.add_argument('--train_extractor', action='store_true')
parser.add_argument('--num_interp', default=5, type=int)
print(parser.parse_args())
main(parser.parse_args())
| 11,351 | 57.515464 | 135 | py |
MoRig | MoRig-master/training/train_corr_shape.py | import sys
sys.path.append("./")
import os, glob, shutil, argparse, numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
from torch_geometric.loader import DataLoader
from utils.log_utils import AverageMeter
from utils.os_utils import isdir, mkdir_p, isfile
from utils.io_utils import save_checkpoint
import models
from datasets.dataset_shape import *
from models.customized_losses import infoNCE
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(train_loader, model, optimizer, args):
global device
model.train() # switch to train mode
loss_corr_meter = AverageMeter()
loss_mask_meter = AverageMeter()
loss_meter = AverageMeter()
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
vtx_feature, pts_feature, pred_vismask, temprature = model(data, args.train_vismask)
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch, data.pts_batch,
data.corr_v2p_batch, data.corr_p2v_batch, tau=temprature)
if args.train_vismask:
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_mask = torch.zeros(1).to(loss_match.device)
loss = loss_match + 5 * loss_mask
loss.backward()
optimizer.step()
loss_corr_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
loss_meter.update(loss.item(), n=len(data.name))
return {"corr_loss": loss_corr_meter.avg, "vis_loss": loss_mask_meter.avg, "total_loss": loss_meter.avg}
def test(test_loader, model, args, save_result=False):
global device
model.eval() # switch to test mode
loss_corr_meter = AverageMeter()
loss_mask_meter = AverageMeter()
loss_meter = AverageMeter()
for data in test_loader:
data = data.to(device)
with torch.no_grad():
vtx_feature, pts_feature, pred_vismask, temprature = model(data, args.train_vismask)
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch, data.pts_batch,
data.corr_v2p_batch, data.corr_p2v_batch, tau=temprature)
if args.train_vismask:
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_mask = torch.zeros(1).to(loss_match.device)
if save_result:
for i in range(len(data.name)):
model_name = data.name[i]
print("processing: ", model_name)
vtx_feature_np = vtx_feature[data.vtx_batch==i].to("cpu").numpy()
pts_feature_np = pts_feature[data.pts_batch==i].to("cpu").numpy()
corr_np = data.corr[data.corr_batch==i].to("cpu").numpy()
similarity = np.matmul(vtx_feature_np, pts_feature_np.T) / temprature.item()
pairwise_nnind = np.argmax(similarity, axis=1)
gt_vismask_np = data.vismask[data.vtx_batch == i].to("cpu").numpy()
pts_i = data.pts[data.pts_batch==i].to("cpu").numpy()
vtx_i = data.vtx[data.vtx_batch==i].to("cpu").numpy()
np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet_shape/{model_name}_corr.npy", corr_np)
np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet_shape/{model_name}_nnind.npy", pairwise_nnind)
np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet_shape/{model_name}_pcd.npy", pts_i)
np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet_shape/{model_name}_vtx.npy", vtx_i)
#np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet_pn2_baseline/{model_name}_{pair_i}_gt_vismask.npy", gt_vismask_np)
if args.train_vismask:
pred_vismask_i = torch.sigmoid(pred_vismask[data.vtx_batch==i])
pred_vismask_np = pred_vismask_i.to("cpu").numpy()
np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet_shape/{model_name}_pred_vismask.npy", pred_vismask_np)
loss = loss_match + 5 * loss_mask
loss_corr_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
loss_meter.update(loss.item(), n=len(data.name))
return {"corr_loss": loss_corr_meter.avg, "vis_loss": loss_mask_meter.avg, "total_loss": loss_meter.avg}
def main(args):
global device
lowest_loss = 1e20
# create checkpoint dir and log dir
if not isdir(args.checkpoint):
print("Create new checkpoint folder " + args.checkpoint)
mkdir_p(args.checkpoint)
if not args.resume:
if isdir(args.logdir):
shutil.rmtree(args.logdir)
mkdir_p(args.logdir)
# create model
model = models.__dict__[args.arch](input_feature=3, output_feature=args.output_feature, temprature=args.tau_nce)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
train_loader = DataLoader(ModelsResourcesShapeDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True,
follow_batch=['vtx', 'pts', 'corr_v2p', 'corr_p2v'], num_workers=args.num_workers)
val_loader = DataLoader(ModelsResourcesShapeDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False,
follow_batch=['vtx', 'pts', 'corr_v2p', 'corr_p2v'], num_workers=args.num_workers)
test_loader = DataLoader(ModelsResourcesShapeDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False,
follow_batch=['vtx', 'pts', 'corr_v2p', 'corr_p2v'], num_workers=args.num_workers)
if args.evaluate:
print('\nEvaluation only')
test_losses = test(test_loader, model, args, save_result=True)
for loss_name, loss_value in test_losses.items():
print(f"test_{loss_name}: {loss_value:6f}. ", end="")
return
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.schedule, gamma=args.gamma)
logger = SummaryWriter(log_dir=args.logdir)
for epoch in range(args.start_epoch, args.epochs):
if epoch > args.vis_branch_start_epoch:
args.train_vismask = True
lowest_loss = 1e20
lr = scheduler.get_last_lr()
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr[0]))
train_losses = train(train_loader, model, optimizer, args)
val_losses = test(val_loader, model, args)
test_losses = test(test_loader, model, args)
scheduler.step()
# remember best acc and save checkpoint
is_best = val_losses["total_loss"] < lowest_loss
if is_best:
best_epoch = epoch
lowest_loss = min(val_losses["total_loss"], lowest_loss)
save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'lowest_loss': lowest_loss,
'optimizer': optimizer.state_dict()},
is_best, checkpoint=args.checkpoint)
losses = [train_losses, val_losses, test_losses]
for split_id, split_name in enumerate(["train", "val", "test"]):
print(f"Epoch{epoch + 1}. ", end="")
for loss_name, loss_value in losses[split_id].items():
print(f"{split_name}_{loss_name}: {loss_value:6f}. ", end="")
logger.add_scalar(f"{split_name}_{loss_name}", loss_value, epoch + 1)
print("")
print(best_epoch)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='mesh shape corresponce')
parser.add_argument('--arch', default='corrnet')
parser.add_argument('--start_epoch', default=0, type=int, help='manual epoch number (useful on restarts)')
parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float, help='weight decay (default: 0)')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--epochs', default=300, type=int, help='number of total epochs to run')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--schedule', type=int, nargs='+', default=[200], help='Decrease learning rate at these epochs.')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on val/test set')
parser.add_argument('--num_workers', default=1, type=int, help='number of workers to load data')
parser.add_argument('--train_batch', default=2, type=int, help='train batchsize')
parser.add_argument('--test_batch', default=2, type=int, help='test batchsize')
parser.add_argument('-c', '--checkpoint', default='checkpoints/test', type=str, help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--logdir', default='logs/test', type=str, help='directory to save logs')
parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--train_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/train_deform/',
type=str, help='folder of training data')
parser.add_argument('--val_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/val_deform/',
type=str, help='folder of validation data')
parser.add_argument('--test_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test_deform/', #/mnt/neghvar
type=str, help='folder of testing data')
parser.add_argument('--output_feature', default=64, type=str, help='chn number of output feature')
parser.add_argument('--tau_nce', default=0.07, type=float, help='temprature in hardest nce loss')
parser.add_argument('--train_vismask', action='store_true', help='whether or not to train mask branch')
parser.add_argument('--vis_branch_start_epoch', default=100, type=int, help='manual epoch number (useful on restarts)')
print(parser.parse_args())
main(parser.parse_args())
| 11,073 | 55.213198 | 137 | py |
MoRig | MoRig-master/training/train_rig.py | import sys
sys.path.append("./")
import os, shutil, argparse, numpy as np
from utils.log_utils import AverageMeter
from utils.os_utils import isdir, mkdir_p, isfile
from utils.io_utils import output_point_cloud_ply
import torch
import torch.backends.cudnn as cudnn
from torch_geometric.loader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from datasets.dataset_rig import RigDataset
import models
from models.customized_losses import chamfer_distance_with_average, multi_pos_infoNCE
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar', snapshot=None):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if snapshot and state['epoch'] % snapshot == 0:
shutil.copyfile(filepath, os.path.join(checkpoint, 'checkpoint_{}.pth.tar'.format(state['epoch'])))
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def pairwise_distances(x, y):
#Input: x is a Nxd matrix
# y is an optional Mxd matirx
#Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
# if y is not given then use 'y=x'.
#i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
x_norm = (x ** 2).sum(1).view(-1, 1)
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
return torch.clamp(dist, 0.0, np.inf)
def meanshift_cluster(pts, bandwidth, weights, args):
"""
meanshift written in pytorch
:param pts: input points
:param weights: weight per point during clustering
:return: clustered points
"""
pts_steps = []
for i in range(args.meanshift_step):
Y = pairwise_distances(pts, pts)
K = torch.nn.functional.relu(bandwidth ** 2 - Y)
if weights is not None:
K = K * weights
P = torch.nn.functional.normalize(K, p=1, dim=0, eps=1e-10)
P = P.transpose(0, 1)
pts = args.step_size * (torch.matmul(P, pts) - pts) + pts
pts_steps.append(pts)
return pts_steps
def main(args):
global device
lowest_loss = 1e20
# create checkpoint dir and log dir
if not isdir(args.checkpoint):
print("Create new checkpoint folder " + args.checkpoint)
mkdir_p(args.checkpoint)
if not args.resume:
if isdir(args.logdir):
shutil.rmtree(args.logdir)
mkdir_p(args.logdir)
# create model
if args.arch == "jointnet_motion":
chn_output = 3
elif args.arch == "masknet_motion":
chn_output = 1
else:
raise NotImplementedError
model = models.__dict__[args.arch](chn_output=chn_output, motion_dim=args.motion_dim,
num_keyframes=args.num_keyframes, aggr_method=args.aggr_method)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
lowest_loss = checkpoint['lowest_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
train_loader = DataLoader(RigDataset(root=args.train_folder), batch_size=args.train_batch, shuffle=True, follow_batch=['joints'])
val_loader = DataLoader(RigDataset(root=args.val_folder), batch_size=args.test_batch, shuffle=False, follow_batch=['joints'])
test_loader = DataLoader(RigDataset(root=args.test_folder), batch_size=args.test_batch, shuffle=False, follow_batch=['joints'])
if args.evaluate:
print('\nEvaluation only')
test_losses = test(test_loader, model, args, save_result=True, best_epoch=args.start_epoch)
for loss_name, loss_value in test_losses.items():
print(f"test_{loss_name}: {loss_value:6f}. ", end="")
return
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.schedule, gamma=args.gamma)
logger = SummaryWriter(log_dir=args.logdir)
for epoch in range(args.start_epoch, args.epochs):
lr = scheduler.get_last_lr()
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr[0]))
train_losses = train(train_loader, model, optimizer, args)
val_losses = test(val_loader, model, args)
test_losses = test(test_loader, model, args)
scheduler.step()
losses = [train_losses, val_losses, test_losses]
for split_id, split_name in enumerate(["train", "val", "test"]):
print(f"Epoch{epoch + 1}. ", end="")
for loss_name, loss_value in losses[split_id].items():
print(f"{split_name}_{loss_name}: {loss_value:6f}. ", end="")
logger.add_scalar(f"{split_name}_{loss_name}", loss_value, epoch + 1)
print("")
# remember best acc and save checkpoint
is_best = val_losses["total_loss"] < lowest_loss
lowest_loss = min(val_losses["total_loss"], lowest_loss)
save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'lowest_loss': lowest_loss,
'optimizer': optimizer.state_dict()}, is_best, checkpoint=args.checkpoint)
def train(train_loader, model, optimizer, args):
global device
model.train() # switch to train mode
loss_chamfer_meter = AverageMeter()
loss_l1_meter = AverageMeter()
loss_motion_meter = AverageMeter()
loss_bce_meter = AverageMeter()
loss_meter = AverageMeter()
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
if args.arch == 'masknet_motion':
if np.random.uniform() > 0.5:
input_flow = data.gt_flow
else:
input_flow = data.pred_flow
motion_all, motion_aggr, mask_pred = model(data, input_flow)
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += multi_pos_infoNCE(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += multi_pos_infoNCE(motion_aggr, data.gt_skin, data.batch)
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(mask_pred, data.mask.float().unsqueeze(1), reduction='mean')
loss = 0.1 * loss_embedding + loss_mask
loss_motion_meter.update(0.1 * loss_embedding.item())
loss_bce_meter.update(loss_mask.item())
elif args.arch == 'jointnet_motion':
if np.random.uniform() > 0.5:
input_flow = data.gt_flow
else:
input_flow = data.pred_flow
motion_all, motion_aggr, data_displacement = model(data, input_flow)
data_displacement = torch.tanh(data_displacement)
y_pred = data_displacement + data.pos
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += multi_pos_infoNCE(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += multi_pos_infoNCE(motion_aggr, data.gt_skin, data.batch)
loss_chamfer = 0.0
for i in range(len(torch.unique(data.joints_batch))):
joint_gt = data.joints[data.joints_batch == i, :]
y_pred_i = y_pred[data.batch == i, :]
loss_chamfer += chamfer_distance_with_average(y_pred_i.unsqueeze(0), joint_gt.unsqueeze(0))
loss_chamfer /= len(torch.unique(data.joints_batch))
loss_l1 = torch.nn.functional.l1_loss(data_displacement, data.offsets)
loss = 0.1 * loss_embedding + loss_chamfer + loss_l1
loss_motion_meter.update(0.1 * loss_embedding.item())
loss_chamfer_meter.update(loss_chamfer.item())
loss_l1_meter.update(loss_l1.item())
else:
raise NotImplementedError
loss.backward()
optimizer.step()
loss_meter.update(loss.item())
return {"loss_chamfer": loss_chamfer_meter.avg, "loss_l1": loss_l1_meter.avg,
"loss_motion": loss_motion_meter.avg, "loss_bce": loss_bce_meter.avg,
"total_loss": loss_meter.avg}
def test(test_loader, model, args, save_result=False, best_epoch=None):
global device
model.eval() # switch to test mode
loss_chamfer_meter = AverageMeter()
loss_l1_meter = AverageMeter()
loss_motion_meter = AverageMeter()
loss_bce_meter = AverageMeter()
loss_meter = AverageMeter()
for data in test_loader:
data = data.to(device)
with torch.no_grad():
if args.arch == 'masknet_motion':
input_flow = data.pred_flow
motion_all, motion_aggr, mask_pred = model(data, input_flow)
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += multi_pos_infoNCE(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += multi_pos_infoNCE(motion_aggr, data.gt_skin, data.batch)
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(mask_pred, data.mask.float().unsqueeze(1), reduction='mean')
loss = 0.1 * loss_embedding + loss_mask
loss_motion_meter.update(0.1 * loss_embedding.item())
loss_bce_meter.update(loss_mask.item())
elif args.arch == 'jointnet_motion':
input_flow = data.pred_flow
motion_all, motion_aggr, data_displacement = model(data, input_flow)
data_displacement = torch.tanh(data_displacement)
y_pred = data_displacement + data.pos
loss_embedding = 0.0
for t in range(motion_all.shape[1]):
loss_embedding += multi_pos_infoNCE(motion_all[:, t, :], data.gt_skin, data.batch)
loss_embedding += multi_pos_infoNCE(motion_aggr, data.gt_skin, data.batch)
loss_chamfer = 0.0
for i in range(len(torch.unique(data.joints_batch))):
joint_gt = data.joints[data.joints_batch == i, :]
y_pred_i = y_pred[data.batch == i, :]
loss_chamfer += chamfer_distance_with_average(y_pred_i.unsqueeze(0), joint_gt.unsqueeze(0))
loss_chamfer /= len(torch.unique(data.joints_batch))
loss_l1 = torch.nn.functional.l1_loss(data_displacement, data.offsets)
loss = 0.1 * loss_embedding + loss_chamfer + loss_l1
loss_motion_meter.update(0.1 * loss_embedding.item())
loss_chamfer_meter.update(loss_chamfer.item())
loss_l1_meter.update(loss_l1.item())
else:
raise NotImplementedError
loss_meter.update(loss.item())
if save_result:
output_folder = args.output_folder
if not os.path.exists(output_folder):
mkdir_p(output_folder)
if args.arch == 'masknet_motion':
mask_pred = torch.sigmoid(mask_pred)
for i in range(len(torch.unique(data.batch))):
mask_pred_sample = mask_pred[data.batch == i]
motion_embedding_sample = motion_aggr[data.batch == i]
np.save(os.path.join(output_folder, str(data.name[i].item()) + '_attn.npy'),
mask_pred_sample.data.to("cpu").numpy())
# np.save(os.path.join(output_folder, str(data.name[i].item()) + '_embedding.npy'),
# motion_embedding_sample.data.to("cpu").numpy())
else:
for i in range(len(torch.unique(data.batch))):
y_pred_sample = y_pred[data.batch == i, :]
output_point_cloud_ply(y_pred_sample, name=str(data.name[i].item()),
output_folder=args.output_folder)
return {"loss_chamfer": loss_chamfer_meter.avg, "loss_l1": loss_l1_meter.avg,
"loss_motion": loss_motion_meter.avg, "loss_bce": loss_bce_meter.avg,
"total_loss": loss_meter.avg}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='rigging network')
parser.add_argument('--arch', default='jointnet_motion', choices=['jointnet_motion', 'masknet_motion'])
parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, help='weight decay (default: 0)')
parser.add_argument('--gamma', type=float, default=0.2, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--epochs', default=120, type=int, help='number of total epochs to run')
parser.add_argument('--lr', '--learning-rate', default=5e-4, type=float, help='initial learning rate')
parser.add_argument('--schedule', type=int, nargs='+', default=[40, 80], help='Decrease learning rate at these epochs.')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on val/test set')
parser.add_argument('--train_batch', default=2, type=int, help='train batchsize')
parser.add_argument('--test_batch', default=2, type=int, help='test batchsize')
parser.add_argument('-c', '--checkpoint', default='checkpoints/test', type=str, help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--logdir', default='logs/test', type=str, help='directory to save logs')
parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--train_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/train/', type=str, help='folder of training data')
parser.add_argument('--val_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/val/', type=str, help='folder of validation data')
parser.add_argument('--test_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test/', type=str, help='folder of testing data')
parser.add_argument('--num_keyframes', default=5, type=int)
parser.add_argument('--aggr_method', default="attn", type=str, choices=["max", "mean", "attn"])
parser.add_argument('--motion_dim', default=32, type=int)
parser.add_argument('--output_folder', default='results/our_results', type=str)
print(parser.parse_args())
main(parser.parse_args())
| 15,151 | 50.362712 | 145 | py |
MoRig | MoRig-master/training/train_corr_pose.py | import sys
sys.path.append("./")
import os, glob, shutil, argparse, numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
from torch_geometric.loader import DataLoader
from utils.log_utils import AverageMeter
from utils.os_utils import isdir, mkdir_p, isfile
from utils.io_utils import save_checkpoint
import models
from datasets.dataset_pose import *
from models.customized_losses import infoNCE
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(train_loader, model, optimizer, args):
global device
model.train() # switch to train mode
loss_corr_meter = AverageMeter()
loss_mask_meter = AverageMeter()
loss_meter = AverageMeter()
for data in train_loader:
if args.sequential_frame:
if args.dataset == "modelsresource":
src_frame = np.random.choice(19)
tar_frame_candidates = np.arange(max(0, src_frame-2), src_frame+2).tolist()
elif args.dataset == "deformingthings":
src_frame = np.random.choice(18)
tar_frame_candidates = np.arange(max(0, src_frame-3), src_frame+3).tolist()
else:
raise NotImplementedError
else:
if args.dataset == "modelsresource":
src_frame = 0
tar_frame_candidates = np.arange(1, 6).tolist()
elif args.dataset == "deformingthings":
src_frame = np.random.choice(6)
tar_frame_candidates = np.arange(6).tolist()
else:
raise NotImplementedError
tar_frame_candidates = [tid for tid in tar_frame_candidates if tid != src_frame]
tar_frame = np.random.choice(tar_frame_candidates)
data.vtx = data.vtx_traj[:, 3*src_frame:3*src_frame+3]
data.vtx_batch = data.vtx_traj_batch
data.pts = data.pts_traj[:, 3*tar_frame:3*tar_frame+3]
data.pts_batch = data.pts_traj_batch
corr_v2p_id_tar = (data.corr_v2p_all[:,-1]==tar_frame)
data.corr_v2p = data.corr_v2p_all[corr_v2p_id_tar, 0:-1]
data.corr_v2p_batch = data.corr_v2p_all_batch[corr_v2p_id_tar]
corr_p2v_id_tar = (data.corr_p2v_all[:,-1]==tar_frame)
data.corr_p2v = data.corr_p2v_all[corr_p2v_id_tar, 0:-1]
data.corr_p2v_batch = data.corr_p2v_all_batch[corr_p2v_id_tar]
data.vismask = data.vismask_all[:, tar_frame]
data = data.to(device)
optimizer.zero_grad()
vtx_feature, pts_feature, pred_vismask, temprature = model(data, args.train_vismask)
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch, data.pts_batch,
data.corr_v2p_batch, data.corr_p2v_batch, tau=temprature)
if args.train_vismask:
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_mask = torch.zeros(1).to(device)
loss = loss_match + 5 * loss_mask
loss.backward()
optimizer.step()
loss_corr_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
loss_meter.update(loss.item(), n=len(data.name))
return {"corr_loss": loss_corr_meter.avg, "vis_loss": loss_mask_meter.avg, "total_loss": loss_meter.avg}
def test(test_loader, model, args, save_result=False):
global device
model.eval() # switch to test mode
loss_corr_meter = AverageMeter()
loss_mask_meter = AverageMeter()
loss_meter = AverageMeter()
for data in test_loader:
if args.sequential_frame:
src_frame, tar_frame = 10, 11
else:
src_frame, tar_frame = 0, 3
data.vtx = data.vtx_traj[:, 3 * src_frame:3 * src_frame + 3]
data.vtx_batch = data.vtx_traj_batch
data.pts = data.pts_traj[:, 3 * tar_frame:3 * tar_frame + 3]
data.pts_batch = data.pts_traj_batch
corr_v2p_id_tar = (data.corr_v2p_all[:, -1] == tar_frame)
data.corr_v2p = data.corr_v2p_all[corr_v2p_id_tar, 0:-1]
data.corr_v2p_batch = data.corr_v2p_all_batch[corr_v2p_id_tar]
corr_p2v_id_tar = (data.corr_p2v_all[:, -1] == tar_frame)
data.corr_p2v = data.corr_p2v_all[corr_p2v_id_tar, 0:-1]
data.corr_p2v_batch = data.corr_p2v_all_batch[corr_p2v_id_tar]
data.vismask = data.vismask_all[:, tar_frame]
data = data.to(device)
with torch.no_grad():
vtx_feature, pts_feature, pred_vismask, temprature = model(data, args.train_vismask)
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch, data.pts_batch,
data.corr_v2p_batch, data.corr_p2v_batch, tau=temprature)
if args.train_vismask:
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_mask = torch.zeros(1).to(loss_match.device)
if save_result:
for i in range(len(data.name)):
model_name = data.name[i]
print("processing: ", model_name)
vtx_feature_np = vtx_feature[data.vtx_batch == i].to("cpu").numpy()
pts_feature_np = pts_feature[data.pts_batch == i].to("cpu").numpy()
corr_v2p_np = data.corr_v2p[data.corr_v2p_batch == i].to("cpu").numpy()
similarity = np.matmul(vtx_feature_np, pts_feature_np.T) / temprature.item()
pairwise_nnind = np.argmax(similarity, axis=1)
pts_i = data.pts[data.pts_batch == i].to("cpu").numpy()
vtx_i = data.vtx[data.vtx_batch == i].to("cpu").numpy()
# np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet/{model_name}_gt_corr.npy", corr_v2p_np)
# np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet/{model_name}_nnind.npy", pairwise_nnind)
# np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet/{model_name}_pts.npy", pts_i)
# np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet/{model_name}_vtx.npy", vtx_i)
if args.train_vismask:
pred_vismask_i = torch.sigmoid(pred_vismask[data.vtx_batch == i])
pred_vismask_np = pred_vismask_i.to("cpu").numpy().squeeze(axis=1)
gt_vismask_np = data.vismask[data.vtx_batch == i].to("cpu").numpy()
# np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet/{model_name}_pred_vismask.npy", pred_vismask_np)
# np.save(f"/mnt/DATA_LINUX/zhan/output/corrnet/{model_name}_gt_vismask.npy", gt_vismask_np)
loss = loss_match + 5*loss_mask
loss_corr_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
loss_meter.update(loss.item(), n=len(data.name))
return {"corr_loss": loss_corr_meter.avg, "vis_loss": loss_mask_meter.avg, "total_loss": loss_meter.avg}
def main(args):
global device
lowest_loss = 1e20
# create checkpoint dir and log dir
if not isdir(args.checkpoint):
print("Create new checkpoint folder " + args.checkpoint)
mkdir_p(args.checkpoint)
if not args.resume:
if isdir(args.logdir):
shutil.rmtree(args.logdir)
mkdir_p(args.logdir)
# create model
model = models.__dict__[args.arch](input_feature=3, output_feature=args.output_feature, temprature=args.tau_nce)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
if args.sequential_frame:
if args.dataset == "deformingthings":
train_loader = DataLoader(SeqDeformingThingsDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(SeqDeformingThingsDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(SeqDeformingThingsDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
elif args.dataset == "modelsresource":
train_loader = DataLoader(SeqModelsResourcesDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(SeqModelsResourcesDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(SeqModelsResourcesDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
else:
raise NotImplementedError
else:
if args.dataset == "deformingthings":
train_loader = DataLoader(DeformingThingsDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(DeformingThingsDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(DeformingThingsDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
elif args.dataset == "modelsresource":
train_loader = DataLoader(ModelsResourcesDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(ModelsResourcesDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(ModelsResourcesDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
else:
raise NotImplementedError
if args.evaluate:
print('\nEvaluation only')
test_losses = test(test_loader, model, args, save_result=True)
for loss_name, loss_value in test_losses.items():
print(f"test_{loss_name}: {loss_value:6f}. ", end="")
return
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.schedule, gamma=args.gamma)
logger = SummaryWriter(log_dir=args.logdir)
for epoch in range(args.start_epoch, args.epochs):
if epoch == args.vis_branch_start_epoch:
args.train_vismask = True
lowest_loss = 1e20
lr = scheduler.get_last_lr()
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr[0]))
train_losses = train(train_loader, model, optimizer, args)
val_losses = test(val_loader, model, args)
test_losses = test(test_loader, model, args)
scheduler.step()
# remember best acc and save checkpoint
is_best = val_losses["total_loss"] < lowest_loss
if is_best:
best_epoch = epoch
lowest_loss = min(val_losses["total_loss"], lowest_loss)
save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'lowest_loss': lowest_loss,
'optimizer': optimizer.state_dict()},
is_best, checkpoint=args.checkpoint)
losses = [train_losses, val_losses, test_losses]
for split_id, split_name in enumerate(["train", "val", "test"]):
print(f"Epoch{epoch + 1}. ", end="")
for loss_name, loss_value in losses[split_id].items():
print(f"{split_name}_{loss_name}: {loss_value:6f}. ", end="")
logger.add_scalar(f"{split_name}_{loss_name}", loss_value, epoch + 1)
print("")
print("Best epoch:", best_epoch)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='mesh depth corresponce')
parser.add_argument('--arch', default='corrnet')
parser.add_argument('--start_epoch', default=0, type=int, help='manual epoch number (useful on restarts)')
parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float, help='weight decay (default: 0)')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--epochs', default=300, type=int, help='number of total epochs to run')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--schedule', type=int, nargs='+', default=[200], help='Decrease learning rate at these epochs.')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on val/test set')
parser.add_argument('--num_workers', default=1, type=int, help='number of workers to load data')
parser.add_argument('--train_batch', default=2, type=int, help='train batchsize')
parser.add_argument('--test_batch', default=2, type=int, help='test batchsize')
parser.add_argument('-c', '--checkpoint', default='checkpoints/test', type=str, help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--logdir', default='logs/test', type=str, help='directory to save logs')
parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--train_folder', default='/mnt/DATA_LINUX2/zhan/morig/DeformingThings4D/train/',
type=str, help='folder of training data') # /mnt/neghvar
parser.add_argument('--val_folder', default='/mnt/DATA_LINUX2/zhan/morig/DeformingThings4D/val/',
type=str, help='folder of validation data')
parser.add_argument('--test_folder', default='/mnt/DATA_LINUX2/zhan/morig/DeformingThings4D/test/',
type=str, help='folder of testing data')
parser.add_argument('--output_feature', default=64, type=str, help='chn number of output feature')
parser.add_argument('--tau_nce', default=0.07, type=float, help='temprature in hardest nce loss')
parser.add_argument('--train_vismask', action='store_true', help='whether or not to train mask branch')
parser.add_argument('--vis_branch_start_epoch', default=100, type=int)
parser.add_argument('--sequential_frame', action='store_true')
parser.add_argument('--dataset', default='modelsresource', choices=['deformingthings', 'modelsresource'])
print(parser.parse_args())
main(parser.parse_args())
| 17,016 | 58.919014 | 137 | py |
MoRig | MoRig-master/training/train_deform_pose.py | import sys
sys.path.append("./")
import os, shutil, argparse, numpy as np, glob
import torch
import torch.backends.cudnn as cudnn
from torch_geometric.loader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from utils.log_utils import AverageMeter
from utils.os_utils import isdir, mkdir_p, isfile
from utils.io_utils import save_checkpoint
from datasets.dataset_pose import *
import models
from models.customized_losses import infoNCE
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(train_loader, model, optimizer, args):
global device
model.train() # switch to train mode
loss_flow_meter = AverageMeter()
loss_match_meter = AverageMeter()
loss_mask_meter = AverageMeter()
for data in train_loader:
if args.sequential_frame:
if args.dataset == "modelsresource":
src_frame = np.random.choice(19)
tar_frame_candidates = np.arange(max(0, src_frame-2), src_frame+2).tolist()
elif args.dataset == "deformingthings":
src_frame = np.random.choice(18)
tar_frame_candidates = np.arange(max(0, src_frame-3), src_frame+3).tolist()
else:
raise NotImplementedError
else:
if args.dataset == "modelsresource":
src_frame = 0
tar_frame_candidates = np.arange(1, 6).tolist()
elif args.dataset == "deformingthings":
src_frame = np.random.choice(6)
tar_frame_candidates = np.arange(6).tolist()
else:
raise NotImplementedError
tar_frame_candidates = [tid for tid in tar_frame_candidates if tid != src_frame]
tar_frame = np.random.choice(tar_frame_candidates)
data.vtx = data.vtx_traj[:, 3 * src_frame:3 * src_frame + 3]
data.vtx_batch = data.vtx_traj_batch
data.pts = data.pts_traj[:, 3 * tar_frame:3 * tar_frame + 3]
data.pts_batch = data.pts_traj_batch
corr_v2p_id_tar = (data.corr_v2p_all[:, -1] == tar_frame)
data.corr_v2p = data.corr_v2p_all[corr_v2p_id_tar, 0:-1]
data.corr_v2p_batch = data.corr_v2p_all_batch[corr_v2p_id_tar]
corr_p2v_id_tar = (data.corr_p2v_all[:, -1] == tar_frame)
data.corr_p2v = data.corr_p2v_all[corr_p2v_id_tar, 0:-1]
data.corr_p2v_batch = data.corr_p2v_all_batch[corr_p2v_id_tar]
data.vismask = data.vismask_all[:, tar_frame]
data = data.to(device)
optimizer.zero_grad()
pred_flow, vtx_feature, pts_feature, pred_vismask, tau_nce = model(data)
loss_flow = torch.nn.functional.l1_loss(pred_flow, data.vtx_traj[:, 3 * tar_frame:3 * tar_frame + 3] - data.vtx)
if args.train_extractor:
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch, data.pts_batch,
data.corr_v2p_batch, data.corr_p2v_batch, tau=tau_nce)
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_match = torch.zeros(1).to(loss_flow.device)
loss_mask = torch.zeros(1).to(loss_flow.device)
loss = loss_match + loss_mask + loss_flow
loss.backward()
optimizer.step()
loss_flow_meter.update(loss_flow.item(), n=len(data.name))
loss_match_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
return {"flow_loss": loss_flow_meter.avg, "corr_loss": loss_match_meter.avg, "vis_loss": loss_mask_meter.avg}
def test(test_loader, model, args, save_result=False):
global device
model.eval() # switch to test mode
loss_flow_meter = AverageMeter()
loss_match_meter = AverageMeter()
loss_mask_meter = AverageMeter()
for data in test_loader:
if args.sequential_frame:
src_frame, tar_frame = 10, 11
else:
src_frame, tar_frame = 0, 3
data.vtx = data.vtx_traj[:, 3 * src_frame:3 * src_frame + 3]
data.vtx_batch = data.vtx_traj_batch
data.pts = data.pts_traj[:, 3 * tar_frame:3 * tar_frame + 3]
data.pts_batch = data.pts_traj_batch
corr_v2p_id_tar = (data.corr_v2p_all[:, -1] == tar_frame)
data.corr_v2p = data.corr_v2p_all[corr_v2p_id_tar, 0:-1]
data.corr_v2p_batch = data.corr_v2p_all_batch[corr_v2p_id_tar]
corr_p2v_id_tar = (data.corr_p2v_all[:, -1] == tar_frame)
data.corr_p2v = data.corr_p2v_all[corr_p2v_id_tar, 0:-1]
data.corr_p2v_batch = data.corr_p2v_all_batch[corr_p2v_id_tar]
data.vismask = data.vismask_all[:, tar_frame]
data = data.to(device)
with torch.no_grad():
pred_flow, vtx_feature, pts_feature, pred_vismask, tau_nce = model(data)
loss_flow = torch.nn.functional.l1_loss(pred_flow, data.vtx_traj[:, 3 * tar_frame:3 * tar_frame + 3] - data.vtx)
if args.train_extractor:
loss_match = infoNCE(vtx_feature, pts_feature, data.corr_v2p, data.corr_p2v, data.vtx_batch,
data.pts_batch, data.corr_v2p_batch, data.corr_p2v_batch, tau=tau_nce)
loss_mask = torch.nn.functional.binary_cross_entropy_with_logits(pred_vismask, data.vismask[:, None].float())
else:
loss_match = torch.zeros(1).to(loss_flow.device)
loss_mask = torch.zeros(1).to(loss_flow.device)
loss_flow_meter.update(loss_flow.item(), n=len(data.name))
loss_match_meter.update(loss_match.item(), n=len(data.name))
loss_mask_meter.update(loss_mask.item(), n=len(data.name))
if save_result:
for i in range(len(data.name)):
model_name = data.name[i]
print("processing: ", model_name, tar_frame)
vtx_tar_i = data.vtx_traj[data.vtx_batch==i, 3 * tar_frame:3 * tar_frame + 3].to("cpu").numpy()
vtx_src_i = data.vtx[data.vtx_batch==i].to("cpu").numpy()
pred_flow_np = pred_flow[data.vtx_batch==i].to("cpu").numpy()
pts_i = data.pts[data.pts_batch==i].to("cpu").numpy()
np.save(f"/mnt/DATA_LINUX/zhan/output/deformnet_modelsresources/{model_name}_pred_flow.npy", pred_flow_np)
np.save(f"/mnt/DATA_LINUX/zhan/output/deformnet_modelsresources/{model_name}_tar_pts.npy", pts_i)
np.save(f"/mnt/DATA_LINUX/zhan/output/deformnet_modelsresources/{model_name}_tar_vtx.npy", vtx_tar_i)
np.save(f"/mnt/DATA_LINUX/zhan/output/deformnet_modelsresources/{model_name}_src_vtx.npy", vtx_src_i)
return {"flow_loss": loss_flow_meter.avg, "corr_loss": loss_match_meter.avg, "vis_loss": loss_mask_meter.avg}
def main(args):
global device
lowest_loss = 1e20
# create checkpoint dir and log dir
if not isdir(args.checkpoint):
print("Create new checkpoint folder " + args.checkpoint)
mkdir_p(args.checkpoint)
if not args.resume:
if isdir(args.logdir):
shutil.rmtree(args.logdir)
mkdir_p(args.logdir)
# create model
model = models.__dict__[args.arch](tau_nce=args.tau_nce, num_interp=args.num_interp)
model.to(device)
model.corr_extractor.load_state_dict(torch.load(args.init_extractor)['state_dict'])
if not args.train_extractor:
for name, param in model.corr_extractor.named_parameters():
param.requires_grad = False
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.Adam([{"params": model.corr_extractor.parameters(), "lr": 1e-4},
{'params': model.voting.parameters()},
{'params': model.completing.parameters()}],
lr=args.lr, weight_decay=args.weight_decay)
if args.resume: # optionally resume from a checkpoint
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
#args.start_epoch = checkpoint['epoch']
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
if args.sequential_frame:
if args.dataset == "deformingthings":
train_loader = DataLoader(SeqDeformingThingsDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(SeqDeformingThingsDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(SeqDeformingThingsDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
elif args.dataset == "modelsresource":
train_loader = DataLoader(SeqModelsResourcesDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(SeqModelsResourcesDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(SeqModelsResourcesDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
else:
raise NotImplementedError
else:
if args.dataset == "deformingthings":
train_loader = DataLoader(DeformingThingsDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(DeformingThingsDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(DeformingThingsDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
elif args.dataset == "modelsresource":
train_loader = DataLoader(ModelsResourcesDataset(root=args.train_folder, transform=None),
batch_size=args.train_batch, shuffle=True, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
val_loader = DataLoader(ModelsResourcesDataset(root=args.val_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
test_loader = DataLoader(ModelsResourcesDataset(root=args.test_folder, transform=None),
batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers,
follow_batch=['vtx_traj', 'pts_traj', 'corr_v2p_all', 'corr_p2v_all'])
else:
raise NotImplementedError
if args.evaluate:
print('\nEvaluation only')
test_losses = test(test_loader, model, args, save_result=True)
for loss_name, loss_value in test_losses.items():
print(f"test_{loss_name}: {loss_value:6f}. ", end="")
return
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.schedule, gamma=args.gamma)
logger = SummaryWriter(log_dir=args.logdir)
for epoch in range(args.start_epoch, args.epochs):
lr = scheduler.get_last_lr()
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr[0]))
train_losses = train(train_loader, model, optimizer, args)
val_losses = test(val_loader, model, args)
test_losses = test(test_loader, model, args)
scheduler.step()
# remember best acc and save checkpoint
is_best = (val_losses["flow_loss"] + val_losses["corr_loss"] + val_losses["vis_loss"]) < lowest_loss
if is_best:
best_epoch = epoch + 1
lowest_loss = min((val_losses["flow_loss"] + val_losses["corr_loss"] + val_losses["vis_loss"]), lowest_loss)
save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'lowest_loss': lowest_loss,
'optimizer': optimizer.state_dict()},
is_best, checkpoint=args.checkpoint)
losses = [train_losses, val_losses, test_losses]
for split_id, split_name in enumerate(["train", "val", "test"]):
print(f"Epoch{epoch + 1}. ", end="")
for loss_name, loss_value in losses[split_id].items():
print(f"{split_name}_{loss_name}: {loss_value:6f}. ", end="")
logger.add_scalar(f"{split_name}_{loss_name}", loss_value, epoch + 1)
print("")
print("best epoch is", best_epoch)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='scan mesh corresponce')
parser.add_argument('--arch', default='deformnet')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 0)')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--epochs', default=120, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--schedule', type=int, nargs='+', default=[60, 120], help='Decrease learning rate at these epochs.')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on val/test set')
parser.add_argument('--num_workers', default=1, type=int, help='number of workers to load data')
parser.add_argument('--train_batch', default=2, type=int, metavar='N', help='train batchsize')
parser.add_argument('--test_batch', default=2, type=int, metavar='N', help='test batchsize')
parser.add_argument('-c', '--checkpoint', default='checkpoints/test', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--logdir', default='logs/test', type=str, metavar='LOG', help='directory to save logs')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--train_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/train/',
type=str, help='folder of training data')
parser.add_argument('--val_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/val/',
type=str, help='folder of validation data')
parser.add_argument('--test_folder', default='/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test/',
type=str, help='folder of testing data') #/mnt/neghvar
parser.add_argument('--init_extractor', default='checkpoints/corr_p_mr/model_best.pth.tar',
type=str, help='folder of testing data')
parser.add_argument('--num_frames', default=5, type=int, help='random sample points from the next num_frame frames')
parser.add_argument('--tau_nce', default=0.07, type=float, help='temprature in hardest nce loss')
parser.add_argument('--train_extractor', action='store_true')
parser.add_argument('--num_interp', default=5, type=int)
parser.add_argument('--sequential_frame', action='store_true')
parser.add_argument('--dataset', default='modelsresource', choices=['deformingthings', 'modelsresource'])
print(parser.parse_args())
main(parser.parse_args())
| 17,600 | 60.757895 | 135 | py |
MoRig | MoRig-master/models/rootnet.py | #-------------------------------------------------------------------------------
# Name: Root_GCN.py
# Purpose: definition of root prediction module.
# RigNet Copyright 2020 University of Massachusetts
# RigNet is made available under General Public License Version 3 (GPLv3), or under a Commercial License.
# Please see the LICENSE README.txt file in the main directory for more information and instruction on using and licensing RigNet.
#-------------------------------------------------------------------------------
import torch
from models.basic_modules import MLP, SAModule, GlobalSAModule, FPModule
from torch_scatter import scatter_max
from torch.nn import Sequential, Linear
from models.bonenet import GCU
__all__ = ['ROOTNET']
class ShapeEncoder(torch.nn.Module):
def __init__(self, aggr='max'):
super(ShapeEncoder, self).__init__()
self.gcu_1 = GCU(in_channels=3, out_channels=64, aggr=aggr)
self.gcu_2 = GCU(in_channels=64, out_channels=128, aggr=aggr)
self.gcu_3 = GCU(in_channels=128, out_channels=256, aggr=aggr)
self.mlp_glb = MLP([(64 + 128 + 256), 128])
#self.mlp_glb = MLP([(64 + 128 + 256), 512])
def forward(self, data):
x_1 = self.gcu_1(data.pos, data.tpl_edge_index, data.geo_edge_index)
x_2 = self.gcu_2(x_1, data.tpl_edge_index, data.geo_edge_index)
x_3 = self.gcu_3(x_2, data.tpl_edge_index, data.geo_edge_index)
x_4 = self.mlp_glb(torch.cat([x_1, x_2, x_3], dim=1))
x_global, _ = scatter_max(x_4, data.batch, dim=0)
return x_global
class JointEncoder(torch.nn.Module):
def __init__(self):
super(JointEncoder, self).__init__()
'''self.mlp_1 = MLP([4, 64])
self.mlp_2 = MLP([64, 128, 1024])
self.mlp_3 = MLP([1088, 512, 256, 128, 64])'''
self.sa1_joint = SAModule(0.999, 0.4, MLP([4, 64, 64, 128]), max_num_neighbors=64)
self.sa2_joint = SAModule(0.33, 0.6, MLP([128 + 3, 128, 128, 256]), max_num_neighbors=64)
self.sa3_joint = GlobalSAModule(MLP([256 + 3, 256, 256, 512]))
self.fp3_joint = FPModule(1, MLP([512 + 256, 256, 256]))
self.fp2_joint = FPModule(3, MLP([256 + 128, 128, 128]))
self.fp1_joint = FPModule(3, MLP([128 + 1, 128, 128]))
def forward(self, x, pos, batch):
'''x1= self.mlp_1(torch.cat((pos, x), dim=1))
x2 = self.mlp_2(x1)
x_glb, _ = scatter_max(x2, batch, dim=0)
x_glb = torch.repeat_interleave(x_glb, torch.bincount(batch), dim=0)
x3 = self.mlp_3(torch.cat((x_glb, x1), dim=1))
return x3'''
sa0_joint = (x, pos, batch)
sa1_joint = self.sa1_joint(*sa0_joint)
sa2_joint = self.sa2_joint(*sa1_joint)
sa3_joint = self.sa3_joint(*sa2_joint)
fp3_joint = self.fp3_joint(*sa3_joint, *sa2_joint)
fp2_joint = self.fp2_joint(*fp3_joint, *sa1_joint)
x_joint, _, _ = self.fp1_joint(*fp2_joint, *sa0_joint)
return x_joint
class ROOTNET(torch.nn.Module):
def __init__(self):
super(ROOTNET, self).__init__()
self.shape_encoder = ShapeEncoder()
self.joint_encoder = JointEncoder()
self.back_layers = Sequential(MLP([128 + 128, 200, 64]), Linear(64, 1))
def forward(self, data, shuffle=True):
joints_label = []
joints_shuffle = []
for i in range(len(torch.unique(data.joints_batch))):
joint_i = data.joints[data.joints_batch==i]
label_i = joint_i.new(torch.Size((joint_i.shape[0], 1))).zero_()
label_i[0, 0] = 1
# random shuffle
if shuffle:
idx = torch.randperm(label_i.nelement())
label_i = label_i[idx]
joint_i = joint_i[idx]
joints_shuffle.append(joint_i)
joints_label.append(label_i)
joints_shuffle = torch.cat(joints_shuffle, dim=0)
joints_label = torch.cat(joints_label)
x_glb_shape = self.shape_encoder(data)
shape_feature = torch.repeat_interleave(x_glb_shape, torch.bincount(data.joints_batch), dim=0)
joint_feature = self.joint_encoder(torch.abs(joints_shuffle[:,0:1]), joints_shuffle, data.joints_batch)
x_joint = torch.cat([shape_feature, joint_feature], dim=1)
x_joint = self.back_layers(x_joint)
return x_joint, joints_label
| 4,353 | 45.319149 | 130 | py |
MoRig | MoRig-master/models/deformnet.py | import numpy as np
import torch
import torch.nn.functional as F
from torch.nn import Sequential, Linear
from torch_scatter import scatter_max, scatter_add
from models.corrnet import CorrNet
from models.basic_modules import MLP, GCUMotion
from torch_geometric.nn import knn
__all__ = ['deformnet']
class GCNDeform(torch.nn.Module):
def __init__(self, chn_in, chn_output, aggr='max'):
super(GCNDeform, self).__init__()
self.gcu_1 = GCUMotion(in_channels=chn_in, out_channels=128, aggr=aggr)
self.gcu_2 = GCUMotion(in_channels=128, out_channels=256, aggr=aggr)
self.gcu_3 = GCUMotion(in_channels=256, out_channels=512, aggr=aggr)
self.mlp_glb = MLP([(128 + 256 + 512), 1024])
self.mlp_tramsform = Sequential(MLP([1024 + 3 + chn_in + 128 + 256 + 512, 1024, 256]), Linear(256, chn_output))
def forward(self, pos, feature, geo_edge_index, tpl_edge_index, batch):
x_1 = self.gcu_1(pos, feature, tpl_edge_index, geo_edge_index)
x_2 = self.gcu_2(pos, x_1, tpl_edge_index, geo_edge_index)
x_3 = self.gcu_3(pos, x_2, tpl_edge_index, geo_edge_index)
x_4 = self.mlp_glb(torch.cat([x_1, x_2, x_3], dim=1))
x_global, _ = scatter_max(x_4, batch, dim=0)
x_global = torch.repeat_interleave(x_global, torch.bincount(batch), dim=0)
x_5 = torch.cat([x_global, pos, feature, x_1, x_2, x_3], dim=1)
out = self.mlp_tramsform(x_5)
return out
class DeformNet(torch.nn.Module):
def __init__(self, tau_nce, num_interp):
super(DeformNet, self).__init__()
self.corr_extractor = CorrNet(3, 64, temprature=tau_nce)
self.completing = GCNDeform(chn_in=4, chn_output=3)
self.num_interp = num_interp
def forward(self, data):
vtx_feature, pts_feature, pred_vismask, tau = self.corr_extractor(data, train_vismask=True)
pred_vismask = torch.sigmoid(pred_vismask)
for i in range(len(torch.unique(data.vtx_batch))):
pred_vismask[data.vtx_batch == i] = (pred_vismask[data.vtx_batch == i] - pred_vismask[data.vtx_batch == i].min()) / \
(pred_vismask[data.vtx_batch == i].max() - pred_vismask[data.vtx_batch == i].min())
# for visible part
assign_index = knn(pts_feature, vtx_feature, self.num_interp, data.pts_batch, data.vtx_batch, cosine=True)
euclidean_dist = data.pts[assign_index[1]] - data.vtx[assign_index[0]]
# In practice, we find the following simplification is more efficient. feature_sim is always positive since we only consider top-5 most similar pairs.
feature_sim = torch.sum(pts_feature[assign_index[1]] * vtx_feature[assign_index[0]], dim=-1, keepdim=True)
feature_sim = feature_sim * pred_vismask.repeat_interleave(self.num_interp, dim=0) # maybe remove pred_vismask here??
flow_init = scatter_add(euclidean_dist * feature_sim, assign_index[0], dim=0) / scatter_add(feature_sim, assign_index[0], dim=0)
# for invisible part
vis_vids = (pred_vismask >= 0.5).squeeze(dim=1)
invis_vids = (pred_vismask < 0.5).squeeze(dim=1)
vis_vtx_feature = vtx_feature[vis_vids]
invis_vtx_feature = vtx_feature[invis_vids]
vis_vtx_batch = data.vtx_batch[vis_vids]
invis_vtx_batch = data.vtx_batch[invis_vids]
vis_flow_init = flow_init[vis_vids]
# debug here
# import open3d as o3d
# from utils.vis_utils import drawSphere
# for i in range(len(torch.unique(data.vtx_batch))):
# vis_vtx_i = data.vtx[vis_vids][vis_vtx_batch == i].detach().to("cpu").numpy()
# invis_vtx_i = data.vtx[invis_vids][invis_vtx_batch == i].detach().to("cpu").numpy()
# invis_vtx_feature_i = invis_vtx_feature[invis_vtx_batch == i].detach().to("cpu").numpy()
# vis_vtx_feature_i = vis_vtx_feature[vis_vtx_batch == i].detach().to("cpu").numpy()
# sim = np.matmul(invis_vtx_feature_i, vis_vtx_feature_i.T)
# nnidx = np.argsort(sim, axis=1)[:, -10:]
# pcd = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(data.vtx[data.vtx_batch==i].detach().to("cpu").numpy()))
# pcd.paint_uniform_color([0.8, 0.8, 0.8])
# for t in range(3):
# vis = o3d.visualization.Visualizer()
# vis.create_window()
# vis.add_geometry(pcd)
# vis.add_geometry(drawSphere(invis_vtx_i[len(invis_vtx_i)//4*t], color=[1.0, 0.0, 0.0], radius=0.007))
# nnidx_j = nnidx[len(invis_vtx_i) // 4 * t]
# for j in range(len(nnidx_j)):
# vis.add_geometry(drawSphere(vis_vtx_i[nnidx_j[j]], color=[0.0, 0.0, 1.0], radius=0.007))
# vis.run()
# #vis.capture_screen_image(f"{i}_{t}.png")
# vis.destroy_window()
# We here originally find nearest geodesic neighbors, which requires pre-computing geodesic distance among vertices.
# To make the dataset and training procedure less painful, we change to euclidean nearest neighbors.
# We found the influence on performance is negligible.
assign_index2 = knn(vis_vtx_feature, invis_vtx_feature, self.num_interp, vis_vtx_batch, invis_vtx_batch, cosine=True)
feature_sim = torch.sum(vis_vtx_feature[assign_index2[1]] * invis_vtx_feature[assign_index2[0]], dim=-1, keepdim=True)
invis_flow_init = scatter_add(vis_flow_init[assign_index2[1]] * feature_sim, assign_index2[0], dim=0) / scatter_add(feature_sim, assign_index2[0], dim=0)
flow_init[invis_vids] = invis_flow_init
l1_points = torch.cat((flow_init, pred_vismask), dim=-1)
pred_flow = self.completing(data.vtx, l1_points, data.geo_edge_index, data.tpl_edge_index, data.vtx_batch)
return pred_flow, vtx_feature, pts_feature, pred_vismask, tau
def deformnet(**kwargs):
model = DeformNet(tau_nce=kwargs["tau_nce"], num_interp=kwargs["num_interp"])
return model
| 6,050 | 56.628571 | 161 | py |
MoRig | MoRig-master/models/basic_modules.py | import numpy as np
import torch
from torch.nn import Sequential as Seq, Linear as Lin, ReLU, BatchNorm1d as BN
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import remove_self_loops, add_self_loops
from torch_geometric.nn import knn_interpolate, fps, radius, global_max_pool, PointConv
def radius_cpu(x, y, r, max_num_neighbors):
""" Finds for each element in: obj: `y` all points in: obj:`x` within distance: obj:`r`."""
dist = torch.cdist(y.unsqueeze(0), x.unsqueeze(0)).squeeze(0)
valid_positions = (dist <= r)
reduced_rows = torch.where(torch.sum(valid_positions, dim=1) > max_num_neighbors)[0]
reserve_rows = torch.where(torch.sum(valid_positions, dim=1) <= max_num_neighbors)[0]
if len(reduced_rows) > 0:
col_for_reduced_rows = torch.multinomial(valid_positions[reduced_rows].float(), max_num_neighbors)
edge_for_reduced_rows = torch.stack([col_for_reduced_rows.flatten(),
torch.repeat_interleave(reduced_rows, max_num_neighbors, dim=0).to(
valid_positions.device)], dim=0)
_, col_for_reserve_rows = torch.where(valid_positions[reserve_rows])
num_edge_for_reserve_rows = valid_positions[reserve_rows].sum(dim=1)
edge_for_reserve_rows = torch.stack([col_for_reserve_rows,
torch.repeat_interleave(reserve_rows.to(valid_positions.device),
num_edge_for_reserve_rows, dim=0)], dim=0)
if len(reduced_rows) > 0:
edges = torch.cat((edge_for_reserve_rows, edge_for_reduced_rows), dim=1)
else:
edges = edge_for_reserve_rows
return edges
def MLP(channels, batch_norm=True):
if batch_norm:
return Seq(*[Seq(Lin(channels[i - 1], channels[i]), ReLU(), BN(channels[i], momentum=0.1))
for i in range(1, len(channels))])
else:
return Seq(*[Seq(Lin(channels[i - 1], channels[i]), ReLU()) for i in range(1, len(channels))])
def batch_fps(pts, K):
calc_distances = lambda p0, pts: ((p0 - pts) ** 2).sum(dim=1)
#np.random.seed(0)
def fps(x):
pts, K = x
farthest_idx = torch.LongTensor(K)
farthest_idx.zero_()
farthest_idx[0] = np.random.randint(len(pts))
distances = calc_distances(pts[farthest_idx[0]], pts)
for i in range(1, K):
farthest_idx[i] = torch.max(distances, dim=0)[1]
farthest_pts = pts[farthest_idx[i]]
distances = torch.min(distances, calc_distances(farthest_pts, pts))
pts_sampled = pts[farthest_idx, :]
return pts_sampled, farthest_idx
fps_res = list(map(fps, [(pts[i].to('cpu'), K) for i in range(len(pts))]))
batch_pts = [i[0] for i in fps_res]
batch_pts = torch.stack(batch_pts, dim=0).to(pts[0].device)
batch_id = [i[1] for i in fps_res]
batch_id = torch.stack(batch_id, dim=0).long().to(pts[0].device)
return batch_pts, batch_id
"""PointNet2 components"""
class SAModule(torch.nn.Module):
def __init__(self, ratio, r, nn, max_num_neighbors):
super(SAModule, self).__init__()
self.ratio = ratio
self.r = r
self.max_num_neighbors = max_num_neighbors
self.conv = PointConv(nn)
def forward(self, x, pos, batch, random_start=True):
idx = fps(pos, batch, ratio=self.ratio, random_start=random_start) #, random_start=False
if torch.cuda.is_available():
row, col = radius(pos, pos[idx], self.r, batch, batch[idx], max_num_neighbors=self.max_num_neighbors)
edge_index = torch.stack([col, row], dim=0)
else:
edge_index = radius_cpu(pos, pos[idx], self.r, max_num_neighbors=self.max_num_neighbors)
if x is None:
x = self.conv((None, None), (pos, pos[idx]), edge_index)
else:
x = self.conv((x, x[idx]), (pos, pos[idx]), edge_index)
pos, batch = pos[idx], batch[idx]
return x, pos, batch
class SAModule_msg(torch.nn.Module):
def __init__(self, ratio, r_list, mlp_list, max_num_neighbors_list):
super(SAModule_msg, self).__init__()
self.ratio = ratio
self.r_list = r_list
self.max_num_neighbors_list = max_num_neighbors_list
self.mlp_list = torch.nn.ModuleList()
for nn in mlp_list:
self.mlp_list.append(PointConv(nn))
def forward(self, x, pos, batch):
idx = fps(pos, batch, ratio=self.ratio) #, random_start=False
new_points_list = []
for i in range(len(self.mlp_list)):
if torch.cuda.is_available():
row, col = radius(pos, pos[idx], self.r_list[i], batch, batch[idx], max_num_neighbors=self.max_num_neighbors_list[i])
edge_index = torch.stack([col, row], dim=0)
else:
edge_index = radius_cpu(pos, pos[idx], self.r, max_num_neighbors=self.max_num_neighbors)
if x is None:
new_points_list.append(self.mlp_list[i]((None, None), (pos, pos[idx]), edge_index))
else:
new_points_list.append(self.mlp_list[i]((x, x[idx]), (pos, pos[idx]), edge_index))
x = torch.cat(new_points_list, dim=1)
pos, batch = pos[idx], batch[idx]
return x, pos, batch
class GlobalSAModule(torch.nn.Module):
def __init__(self, nn):
super(GlobalSAModule, self).__init__()
self.nn = nn
def forward(self, x, pos, batch):
x = self.nn(torch.cat([x, pos], dim=1))
x = global_max_pool(x, batch)
pos = pos.new_zeros((x.size(0), 3))
batch = torch.arange(x.size(0), device=batch.device)
return x, pos, batch
class FPModule(torch.nn.Module):
def __init__(self, k, nn):
super(FPModule, self).__init__()
self.k = k
self.nn = nn
def forward(self, x, pos, batch, x_skip, pos_skip, batch_skip):
x = knn_interpolate(x, pos, pos_skip, batch, batch_skip, k=self.k)
if x_skip is not None:
x = torch.cat([x, x_skip], dim=1)
x = self.nn(x)
return x, pos_skip, batch_skip
"""GCN components"""
class EdgeConv(MessagePassing):
def __init__(self, nn_pos, aggr='max', **kwargs):
super(EdgeConv, self).__init__(aggr=aggr, **kwargs)
self.nn_pos = nn_pos
def forward(self, x, edge_index):
""""""
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
return self.propagate(edge_index, x=x)
def message(self, x_i, x_j):
pos_feat = self.nn_pos(torch.cat([x_i, (x_j - x_i)], dim=1))
return pos_feat
def update(self, aggr_out):
aggr_out = aggr_out.view(-1, aggr_out.shape[-1])
return aggr_out
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class GCU(torch.nn.Module):
def __init__(self, in_channels, out_channels, aggr='max'):
super(GCU, self).__init__()
self.edge_conv_tpl = EdgeConv(nn_pos=MLP([in_channels * 2, out_channels // 2, out_channels // 2]), aggr=aggr)
self.edge_conv_geo = EdgeConv(nn_pos=MLP([in_channels * 2, out_channels // 2, out_channels // 2]), aggr=aggr)
self.mlp = MLP([out_channels, out_channels])
def forward(self, pos, tpl_edge_index, geo_edge_index):
x_tpl = self.edge_conv_tpl(pos, tpl_edge_index)
x_geo = self.edge_conv_geo(pos, geo_edge_index)
x_out = torch.cat([x_tpl, x_geo], dim=1)
x_out = self.mlp(x_out)
return x_out
class EdgeConvMotion(MessagePassing):
def __init__(self, nn_x, nn_pos, aggr='max', **kwargs):
super(EdgeConvMotion, self).__init__(aggr=aggr, **kwargs)
self.nn_x = nn_x
self.nn_pos = nn_pos
def forward(self, pos, x, edge_index):
""""""
x = x.unsqueeze(-1) if x.dim() == 1 else x
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
return self.propagate(edge_index, pos=pos, x=x)
def message(self, pos_i, pos_j, x_i, x_j):
x_feat = self.nn_x(torch.cat([x_i, (x_j - x_i)], dim=1))
pos_feat = self.nn_pos(torch.cat([pos_i, (pos_j - pos_i)], dim=1))
return torch.cat([x_feat, pos_feat], dim=1)
def update(self, aggr_out):
aggr_out = aggr_out.view(-1, aggr_out.shape[-1])
return aggr_out
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class GCUMotion(torch.nn.Module):
def __init__(self, in_channels, out_channels, in_channel_pos=3, dim_pos_feat=16, aggr='max'):
super(GCUMotion, self).__init__()
self.edge_conv_tpl = EdgeConvMotion(nn_x=MLP([in_channels * 2, out_channels // 2, out_channels // 2]),
nn_pos=MLP([in_channel_pos * 2, dim_pos_feat, dim_pos_feat]), aggr=aggr)
self.edge_conv_geo = EdgeConvMotion(nn_x=MLP([in_channels * 2, out_channels // 2, out_channels // 2]),
nn_pos=MLP([in_channel_pos * 2, dim_pos_feat, dim_pos_feat]), aggr=aggr)
self.mlp = MLP([out_channels + dim_pos_feat * 2, out_channels])
def forward(self, pos, x, tpl_edge_index, geo_edge_index):
x_tpl = self.edge_conv_tpl(pos, x, tpl_edge_index)
x_geo = self.edge_conv_geo(pos, x, geo_edge_index)
x_out = torch.cat([x_tpl, x_geo], dim=1)
x_out = self.mlp(x_out)
return x_out
| 9,567 | 42.490909 | 133 | py |
MoRig | MoRig-master/models/bonenet.py | #-------------------------------------------------------------------------------
# Name: PairCls_GCN.py
# Purpose: definition of connectivity prediction module.
# RigNet Copyright 2020 University of Massachusetts
# RigNet is made available under General Public License Version 3 (GPLv3), or under a Commercial License.
# Please see the LICENSE README.txt file in the main directory for more information and instruction on using and licensing RigNet.
#-------------------------------------------------------------------------------
import numpy as np
import torch
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, remove_self_loops, softmax
from models.basic_modules import MLP, SAModule, GlobalSAModule, FPModule
from torch.nn import Sequential, Dropout, Linear
from torch_scatter import scatter_max
class EdgeConv(MessagePassing):
def __init__(self, in_channels, out_channels, nn, aggr='max', **kwargs):
super(EdgeConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.nn = nn
def forward(self, x, edge_index):
""""""
x = x.unsqueeze(-1) if x.dim() == 1 else x
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
return self.propagate(edge_index, x=x)
def message(self, x_i, x_j):
return self.nn(torch.cat([x_i, (x_j - x_i)], dim=1))
def update(self, aggr_out):
aggr_out = aggr_out.view(-1, self.out_channels)
return aggr_out
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class GCU(torch.nn.Module):
def __init__(self, in_channels, out_channels, aggr='max'):
super(GCU, self).__init__()
self.edge_conv_tpl = EdgeConv(in_channels=in_channels, out_channels=out_channels // 2,
nn=MLP([in_channels * 2, out_channels // 2, out_channels // 2]), aggr=aggr)
self.edge_conv_geo = EdgeConv(in_channels=in_channels, out_channels=out_channels // 2,
nn=MLP([in_channels * 2, out_channels // 2, out_channels // 2]), aggr=aggr)
self.mlp = MLP([out_channels, out_channels])
def forward(self, x, tpl_edge_index, geo_edge_index):
x_tpl = self.edge_conv_tpl(x, tpl_edge_index)
x_geo = self.edge_conv_geo(x, geo_edge_index)
x_out = torch.cat([x_tpl, x_geo], dim=1)
x_out = self.mlp(x_out)
return x_out
class ShapeEncoder(torch.nn.Module):
def __init__(self, aggr='max'):
super(ShapeEncoder, self).__init__()
self.gcu_1 = GCU(in_channels=3, out_channels=64, aggr=aggr)
self.gcu_2 = GCU(in_channels=64, out_channels=128, aggr=aggr)
self.gcu_3 = GCU(in_channels=128, out_channels=256, aggr=aggr)
self.mlp_glb = MLP([(64 + 128 + 256), 256, 64])
def forward(self, data):
x_1 = self.gcu_1(data.pos, data.tpl_edge_index, data.geo_edge_index)
x_2 = self.gcu_2(x_1, data.tpl_edge_index, data.geo_edge_index)
x_3 = self.gcu_3(x_2, data.tpl_edge_index, data.geo_edge_index)
x_4 = self.mlp_glb(torch.cat([x_1, x_2, x_3], dim=1))
x_global_shape, _ = scatter_max(x_4, data.batch, dim=0)
return x_global_shape
class JointEncoder(torch.nn.Module):
def __init__(self):
super(JointEncoder, self).__init__()
#self.mlp_1 = MLP([3, 64, 128, 1024])
#self.mlp_2 = MLP([1024, 256, 128])
self.sa1_module_joints = SAModule(0.999, 0.4, MLP([3, 64, 64, 128]), max_num_neighbors=64)
self.sa2_module_joints = SAModule(0.33, 0.6, MLP([128 + 3, 128, 128, 256]), max_num_neighbors=64)
self.sa3_module_joints = GlobalSAModule(MLP([256 + 3, 256, 256, 512, 256, 128]))
def forward(self, joints, joints_batch):
'''x1 = self.mlp_1(joints_norepeat)
x_glb, _ = scatter_max(x1, joints_batch, dim=0)
x_glb = self.mlp_2(x_glb)
return x_glb'''
sa0_joints = (None, joints, joints_batch)
sa1_joints = self.sa1_module_joints(*sa0_joints)
sa2_joints = self.sa2_module_joints(*sa1_joints)
sa3_joints = self.sa3_module_joints(*sa2_joints)
x_glb_joint = sa3_joints[0]
return x_glb_joint
class PairCls(torch.nn.Module):
def __init__(self):
super(PairCls, self).__init__()
self.expand_joint_feature = Sequential(MLP([8, 32, 64, 128, 256]))
self.shape_encoder = ShapeEncoder()
self.joint_encoder = JointEncoder()
input_concat_dim = 448
self.mix_transform = Sequential(MLP([input_concat_dim, 128, 64]), Dropout(0.7), Linear(64, 1))
def forward(self, data, permute_joints=True):
joint_feature = self.joint_encoder(data.joints, data.joints_batch)
joint_feature = torch.repeat_interleave(joint_feature, torch.bincount(data.pairs_batch), dim=0)
shape_feature = self.shape_encoder(data)
shape_feature = torch.repeat_interleave(shape_feature, torch.bincount(data.pairs_batch), dim=0)
if permute_joints:
rand_permute = (torch.rand(len(data.pairs))>=0.5).long().to(data.pairs.device)
joints_pair = torch.cat((data.joints[torch.gather(data.pairs, dim=1, index=rand_permute.unsqueeze(dim=1)).squeeze(dim=1).long()],
data.joints[torch.gather(data.pairs, dim=1, index=1-rand_permute.unsqueeze(dim=1)).squeeze(dim=1).long()],
data.pair_attr[:, :-1]), dim=1)
else:
joints_pair = torch.cat((data.joints[data.pairs[:,0].long()], data.joints[data.pairs[:,1].long()], data.pair_attr[:, :-1]), dim=1)
pair_feature = self.expand_joint_feature(joints_pair)
pair_feature = torch.cat((shape_feature, joint_feature, pair_feature), dim=1)
pre_label = self.mix_transform(pair_feature)
gt_label = data.pair_attr[:, -1].unsqueeze(1)
return pre_label, gt_label
| 6,046 | 46.992063 | 143 | py |
MoRig | MoRig-master/models/rignet.py | import torch
from torch.nn import Sequential, Linear
from torch_scatter import scatter_max
from models.basic_modules import MLP, GCUMotion
import numpy as np
__all__ = ['jointnet_motion', 'masknet_motion', 'skinnet_motion']
class TemporalAttn(torch.nn.Module):
def __init__(self, input_size, num_heads, hidden_size, dim_feedforward, output_size):
"""Different from transformer encoder layer, hidden size is defined for a single head, not for all heads"""
super(TemporalAttn, self).__init__()
self.num_heads = num_heads
self.w_qs = Linear(input_size, hidden_size * num_heads, bias=False)
self.w_ks = Linear(input_size, hidden_size * num_heads, bias=False)
self.w_vs = Linear(input_size, hidden_size * num_heads, bias=False)
self.w_o = Linear(hidden_size * num_heads, hidden_size, bias=False)
self.feedforward = MLP([hidden_size, dim_feedforward, output_size])
self.cls_token = torch.nn.Parameter(torch.randn(1, 1, input_size))
def transpose_qkv(self, X):
"""Transposition for parallel computation of multiple attention heads."""
"""From https://d2l.ai/chapter_attention-mechanisms/multihead-attention.html"""
X = X.reshape(X.shape[0], X.shape[1], self.num_heads, -1)
X = X.permute(0, 2, 1, 3)
return X.reshape(-1, X.shape[2], X.shape[3])
def transpose_output(self, X):
"""Reverse the operation of `transpose_qkv`."""
"""From https://d2l.ai/chapter_attention-mechanisms/multihead-attention.html"""
X = X.reshape(-1, self.num_heads, X.shape[1], X.shape[2])
X = X.permute(0, 2, 1, 3)
return X.reshape(X.shape[0], X.shape[1], -1)
def forward(self, x):
cls_token = self.cls_token.expand([x.shape[0], -1, -1])
x_expand = torch.cat([cls_token, x], dim=1)
q, k, v = self.w_qs(x_expand), self.w_ks(x_expand), self.w_vs(x_expand)
q, k, v = self.transpose_qkv(q), self.transpose_qkv(k), self.transpose_qkv(v)
attn = torch.bmm(q, k.transpose(-2, -1))
attn = torch.nn.functional.softmax(attn / np.sqrt(k.size(-1)), dim=-1)
res = torch.bmm(attn, v)
res = self.w_o(self.transpose_output(res))
res = self.feedforward(res[:, 0, :]) # index only the cls token for classification
return res
class GCNRig(torch.nn.Module):
def __init__(self, chn_feature, chn_output, aggr='max'):
super(GCNRig, self).__init__()
self.gcu_1 = GCUMotion(in_channels=chn_feature, out_channels=64, dim_pos_feat=16, aggr=aggr)
self.gcu_2 = GCUMotion(in_channels=64, out_channels=256, dim_pos_feat=16, aggr=aggr)
self.gcu_3 = GCUMotion(in_channels=256, out_channels=512, dim_pos_feat=16, aggr=aggr)
self.mlp_glb = MLP([(64 + 256 + 512), 1024])
self.mlp_transform = Sequential(MLP([1024 + 3 + chn_feature + 64 + 256 + 512, 1024, 256]), Linear(256, chn_output))
def forward(self, pos, feature, tpl_edge_index, geo_edge_index, batch):
x_1 = self.gcu_1(pos, feature, tpl_edge_index, geo_edge_index)
x_2 = self.gcu_2(pos, x_1, tpl_edge_index, geo_edge_index)
x_3 = self.gcu_3(pos, x_2, tpl_edge_index, geo_edge_index)
x_4 = self.mlp_glb(torch.cat([x_1, x_2, x_3], dim=1))
x_global, _ = scatter_max(x_4, batch, dim=0)
x_global = torch.repeat_interleave(x_global, torch.bincount(batch), dim=0)
x_5 = torch.cat([x_global, pos, feature, x_1, x_2, x_3], dim=1)
x_out = self.mlp_transform(x_5)
return x_out
class JointNetMotion(torch.nn.Module):
def __init__(self, num_keyframes, chn_output, aggr_method, aggr='max'):
super(JointNetMotion, self).__init__()
self.num_keyframes = num_keyframes
self.aggr_method = aggr_method
self.motionNet = GCNRig(chn_feature=3, chn_output=32, aggr=aggr)
if self.aggr_method == "attn":
self.aggragator = TemporalAttn(input_size=32, num_heads=2, hidden_size=64, dim_feedforward=512, output_size=64)
self.jointnet = GCNRig(chn_feature=64, chn_output=chn_output, aggr=aggr)
else:
self.jointnet = GCNRig(chn_feature=32, chn_output=chn_output, aggr=aggr)
def forward(self, data, input_flow):
geo_edge_index, tpl_edge_index, batch = data.geo_edge_index, data.tpl_edge_index, data.batch
motion_all = []
for t in range(self.num_keyframes):
motion_t = self.motionNet(data.pos, input_flow[:, 3 * t:3 * t + 3], tpl_edge_index, geo_edge_index, batch)
motion_t = torch.nn.functional.normalize(motion_t, dim=1)
motion_all.append(motion_t)
motion_all = torch.stack(motion_all, dim=1)
if self.aggr_method == "attn":
motion_aggr = self.aggragator(motion_all)
elif self.aggr_method == "mean":
motion_aggr = torch.mean(motion_all, dim=1)
elif self.aggr_method == "max":
motion_aggr = torch.max(motion_all, dim=1)[0]
else:
raise NotImplementedError
motion_aggr = torch.nn.functional.normalize(motion_aggr, dim=1)
pred_shift = self.jointnet(data.pos, motion_aggr, tpl_edge_index, geo_edge_index, batch)
return motion_all, motion_aggr, pred_shift
class MaskNetMotion(torch.nn.Module):
def __init__(self, num_keyframes, chn_output, aggr_method, aggr='max'):
super(MaskNetMotion, self).__init__()
self.num_keyframes = num_keyframes
self.aggr_method = aggr_method
self.motionNet = GCNRig(chn_feature=3, chn_output=32, aggr=aggr)
if self.aggr_method == "attn":
self.aggragator = TemporalAttn(input_size=32, num_heads=2, hidden_size=64, dim_feedforward=512, output_size=64)
self.masknet = GCNRig(chn_feature=64, chn_output=chn_output, aggr=aggr)
else:
self.masknet = GCNRig(chn_feature=32, chn_output=chn_output, aggr=aggr)
def forward(self, data, input_flow):
geo_edge_index, tpl_edge_index, batch = data.geo_edge_index, data.tpl_edge_index, data.batch
motion_all = []
for t in range(self.num_keyframes):
motion_t = self.motionNet(data.pos, input_flow[:, 3 * t:3 * t + 3], tpl_edge_index, geo_edge_index, batch)
motion_t = torch.nn.functional.normalize(motion_t, dim=1)
motion_all.append(motion_t)
motion_all = torch.stack(motion_all, dim=1)
if self.aggr_method == "attn":
motion_aggr = self.aggragator(motion_all)
elif self.aggr_method == "mean":
motion_aggr = torch.mean(motion_all, dim=1)
elif self.aggr_method == "max":
motion_aggr = torch.max(motion_all, dim=1)[0]
else:
raise NotImplementedError
motion_aggr = torch.nn.functional.normalize(motion_aggr, dim=1)
pred_mask = self.masknet(data.pos, motion_aggr, tpl_edge_index, geo_edge_index, batch)
return motion_all, motion_aggr, pred_mask
class SkinNet_inner(torch.nn.Module):
def __init__(self, nearest_bone, use_Dg, use_Lf, motion_dim, use_motion, aggr='max'):
super(SkinNet_inner, self).__init__()
self.use_Dg = use_Dg
self.use_Lf = use_Lf
self.num_nearest_bone = nearest_bone
if self.use_Dg and self.use_Lf:
input_dim = 3 + self.num_nearest_bone * 8
elif self.use_Dg and not self.use_Lf:
input_dim = 3 + self.num_nearest_bone * 7
elif self.use_Lf and not self.use_Dg:
input_dim = 3 + self.num_nearest_bone * 7
else:
input_dim = 3 + self.num_nearest_bone * 6
self.gcu1 = GCUMotion(in_channels=motion_dim, out_channels=256, in_channel_pos=input_dim, dim_pos_feat=64, aggr=aggr)
self.gcu2 = GCUMotion(in_channels=256, out_channels=256, in_channel_pos=input_dim, dim_pos_feat=64, aggr=aggr)
self.gcu3 = GCUMotion(in_channels=256, out_channels=256, in_channel_pos=input_dim, dim_pos_feat=64, aggr=aggr)
self.multi_layer_tranform2 = MLP([256, 512, 1024])
self.cls_branch = Sequential(MLP([1024 + 256, 1024, 512]), Linear(512, self.num_nearest_bone))
def forward(self, data, motion):
samples = data.skin_input
if self.use_Dg and self.use_Lf:
samples = samples[:, 0: 8 * self.num_nearest_bone]
elif self.use_Dg and not self.use_Lf:
samples = samples[:, np.arange(samples.shape[1]) % 8 != 7]
samples = samples[:, 0: 7 * self.num_nearest_bone]
elif self.use_Lf and not self.use_Dg:
samples = samples[:, np.arange(samples.shape[1]) % 8 != 6]
samples = samples[:, 0: 7 * self.num_nearest_bone]
else:
samples = samples[:, np.arange(samples.shape[1]) % 8 != 7]
samples = samples[:, np.arange(samples.shape[1]) % 7 != 6]
samples = samples[:, 0: 6 * self.num_nearest_bone]
raw_input = torch.cat([data.pos, samples], dim=1)
x_1 = self.gcu1(raw_input, motion, data.tpl_edge_index, data.geo_edge_index)
x_global = self.multi_layer_tranform2(x_1)
x_global, _ = scatter_max(x_global, data.batch, dim=0)
x_2 = self.gcu2(raw_input, x_1, data.tpl_edge_index, data.geo_edge_index)
x_3 = self.gcu3(raw_input, x_2, data.tpl_edge_index, data.geo_edge_index)
x_global = torch.repeat_interleave(x_global, torch.bincount(data.batch), dim=0)
x_4 = torch.cat([x_3, x_global], dim=1)
skin_cls_pred = self.cls_branch(x_4)
return skin_cls_pred
class SkinMotion(torch.nn.Module):
def __init__(self, nearest_bone, use_Dg, use_Lf, num_keyframes, use_motion, motion_dim, aggr='max'):
super(SkinMotion, self).__init__()
self.num_keyframes = num_keyframes
self.motion_dim = motion_dim
self.motionNet = GCNRig(chn_feature=3, chn_output=motion_dim, aggr=aggr)
self.aggragator = TemporalAttn(input_size=motion_dim, num_heads=2, hidden_size=64, dim_feedforward=512, output_size=motion_dim)
self.skinNet = SkinNet_inner(nearest_bone, use_Dg, use_Lf, motion_dim, use_motion, aggr)
def forward(self, data, input_flow):
geo_edge_index, tpl_edge_index, batch = data.geo_edge_index, data.tpl_edge_index, data.batch
motion_all = []
for t in range(self.num_keyframes):
motion_t = self.motionNet(data.pos, input_flow[:, 3 * t:3 * t + 3], tpl_edge_index, geo_edge_index, batch)
motion_t = torch.nn.functional.normalize(motion_t, dim=1)
motion_all.append(motion_t)
motion_all = torch.stack(motion_all, dim=1)
motion_aggr = self.aggragator(motion_all)
motion_aggr = torch.nn.functional.normalize(motion_aggr, dim=1)
skin_cls_pred = self.skinNet(data, motion_aggr)
return motion_all, motion_aggr, skin_cls_pred
def jointnet_motion(**kwargs):
model = JointNetMotion(num_keyframes=kwargs["num_keyframes"], chn_output=kwargs["chn_output"], aggr_method=kwargs["aggr_method"])
return model
def masknet_motion(**kwargs):
model = MaskNetMotion(num_keyframes=kwargs["num_keyframes"], chn_output=kwargs["chn_output"], aggr_method=kwargs["aggr_method"])
return model
def skinnet_motion(**kwargs):
model = SkinMotion(nearest_bone=kwargs["nearest_bone"], use_Dg=kwargs["use_Dg"],
use_Lf=kwargs["use_Lf"], num_keyframes=kwargs["num_keyframes"],
use_motion=kwargs["use_motion"], motion_dim=kwargs["motion_dim"])
return model | 11,560 | 51.55 | 135 | py |
MoRig | MoRig-master/models/nn_util.py | """
This file belongs to the MultiBodySync code repository and is distributed for free.
Author: Jiahui Huang <huang-jh18@mails.tsinghua.edu.cn>
"""
import numpy as np
import torch
import torch.nn as nn
class GroupNorm(nn.Sequential):
def __init__(self, in_size, num_groups, name=""):
super(GroupNorm, self).__init__()
self.add_module(name + "gn", nn.GroupNorm(num_groups, in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0.0)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super(_BNBase, self).__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size, name=""):
super(BatchNorm1d, self).__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size, name=""):
super(BatchNorm2d, self).__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class _INBase(nn.Sequential):
def __init__(self, in_size, instance_norm=None, name=""):
super(_INBase, self).__init__()
self.add_module(name + "in", instance_norm(in_size))
class InstanceNorm1d(_INBase):
def __init__(self, in_size, name=""):
super(InstanceNorm1d, self).__init__(in_size, instance_norm=nn.InstanceNorm1d, name=name)
class InstanceNorm2d(_INBase):
def __init__(self, in_size, name=""):
super(InstanceNorm2d, self).__init__(in_size, instance_norm=nn.InstanceNorm2d, name=name)
def get_norm_layer(layer_def, dimension, **kwargs):
if layer_def is None:
return nn.Identity()
class_name = layer_def["class"]
kwargs.update(layer_def)
del kwargs["class"]
return {
"GroupNorm": GroupNorm,
"BatchNorm": [BatchNorm1d, BatchNorm2d][dimension - 1],
"InstanceNorm": [InstanceNorm1d, InstanceNorm2d][dimension - 1]
}[class_name](**kwargs)
class _ConvBase(nn.Sequential):
def __init__(self, in_size, out_size, kernel_size, stride, padding, dilation,
activation, bn, bn_dim, init, conv=None,
bias=True, preact=False, name=""):
super(_ConvBase, self).__init__()
bias = bias and (bn is None)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn is not None:
if not preact:
bn_unit = get_norm_layer(bn, bn_dim, in_size=out_size)
else:
bn_unit = get_norm_layer(bn, bn_dim, in_size=in_size)
if preact:
if bn is not None:
self.add_module(name + 'normlayer', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn is not None:
self.add_module(name + 'normlayer', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class Conv1d(_ConvBase):
def __init__(self, in_size, out_size, kernel_size=1, stride=1, padding=0, dilation=1,
activation=nn.ReLU(inplace=True), bn=None, init=nn.init.kaiming_normal_,
bias=True, preact=False, name=""):
super(Conv1d, self).__init__(
in_size, out_size, kernel_size, stride, padding, dilation,
activation, bn, 1,
init, conv=nn.Conv1d,
bias=bias, preact=preact, name=name)
class Conv2d(_ConvBase):
def __init__(self, in_size, out_size, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1),
activation=nn.ReLU(inplace=True), bn=None, init=nn.init.kaiming_normal_,
bias=True, preact=False, name=""):
super(Conv2d, self).__init__(
in_size, out_size, kernel_size, stride, padding, dilation,
activation, bn, 2,
init, conv=nn.Conv2d,
bias=bias, preact=preact, name=name)
class FC(nn.Sequential):
def __init__(self,
in_size,
out_size,
activation=nn.ReLU(inplace=True),
bn=None,
init=None,
preact=False,
name=""):
super(FC, self).__init__()
fc = nn.Linear(in_size, out_size, bias=bn is None)
if init is not None:
init(fc.weight)
if bn is None:
nn.init.constant_(fc.bias, 0)
if bn is not None:
if not preact:
bn_unit = get_norm_layer(bn, 1, in_size=out_size)
else:
bn_unit = get_norm_layer(bn, 1, in_size=in_size)
if preact:
if bn is not None:
self.add_module(name + 'normlayer', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn is not None:
self.add_module(name + 'normlayer', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class SharedMLP(nn.Sequential):
def __init__(self, args,
bn=None, activation=nn.ReLU(inplace=True),
preact=False, first=False, name=""):
super(SharedMLP, self).__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=bn if (not first or not preact or (i != 0)) else None,
activation=activation if (not first or not preact or
(i != 0)) else None,
preact=preact
))
def knead_leading_dims(n_dim: int, data: torch.Tensor):
if data is None:
return None
data_dim = list(data.size())
knead_size = np.prod(data_dim[:n_dim])
new_size = tuple([knead_size, *data_dim[n_dim:]])
return data.view(new_size)
def break_leading_dim(dim_size: list, data: torch.Tensor):
if data is None:
return None
data_dim = list(data.size())
new_size = tuple([*dim_size, *data_dim[1:]])
return data.view(new_size)
class Seq(nn.Sequential):
def __init__(self, input_channels):
super(Seq, self).__init__()
self.count = 0
self.current_channels = input_channels
def conv1d(self, out_size, kernel_size=1, stride=1, padding=0, dilation=1, activation=nn.ReLU(inplace=True), leaky=False,
bn=None, init=nn.init.kaiming_normal_, bias=True, preact=False, name=""):
if leaky:
activation = nn.LeakyReLU(0.1, inplace=True)
self.add_module(
str(self.count),
Conv1d(
self.current_channels, out_size, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, activation=activation, bn=bn, init=init,
bias=bias, preact=preact, name=name))
self.count += 1
self.current_channels = out_size
return self
def conv2d(self, out_size, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1),
activation=nn.ReLU(inplace=True), bn=None, init=nn.init.kaiming_normal_, bias=True, preact=False, name=""):
self.add_module(
str(self.count),
Conv2d(
self.current_channels, out_size, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, activation=activation, bn=bn, init=init,
bias=bias, preact=preact, name=name))
self.count += 1
self.current_channels = out_size
return self
def fc(self, out_size, activation=nn.ReLU(inplace=True), bn=None, init=None, preact=False, name=""):
self.add_module(
str(self.count),
FC(self.current_channels,
out_size, activation=activation, bn=bn, init=init, preact=preact, name=name))
self.count += 1
self.current_channels = out_size
return self
def dropout(self, p=0.5):
self.add_module(str(self.count), nn.Dropout(p=p))
self.count += 1
return self
def maxpool2d(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
self.add_module(
str(self.count),
nn.MaxPool2d(
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
return_indices=return_indices, ceil_mode=ceil_mode))
self.count += 1
return self | 9,112 | 32.627306 | 125 | py |
MoRig | MoRig-master/models/corrnet.py | import torch
from torch.nn import Sequential as Seq, Linear as Lin, Parameter
from torch_geometric.nn import knn
from torch_scatter import scatter_max
from models.basic_modules import MLP, SAModule, GlobalSAModule, FPModule, GCU
__all__ = ['corrnet']
class CorrNet(torch.nn.Module):
def __init__(self, input_feature, output_feature, temprature, aggr='max'):
super(CorrNet, self).__init__()
self.input_feature = input_feature
self.output_feature = output_feature
self.temprature = Parameter(torch.Tensor([temprature]))
self.vtx_gcu_1 = GCU(in_channels=3, out_channels=32, aggr=aggr)
self.vtx_gcu_2 = GCU(in_channels=32, out_channels=64, aggr=aggr)
self.vtx_gcu_3 = GCU(in_channels=64, out_channels=256, aggr=aggr)
self.vtx_gcu_4 = GCU(in_channels=256, out_channels=512, aggr=aggr)
self.vtx_mlp_glb = MLP([(32 + 64 + 256 + 512), 1024])
self.vtx_mlp = Seq(MLP([1024 + 3 + 32 + 64 + 256 + 512, 1024, 256]), Lin(256, output_feature))
self.pts_sa1_module = SAModule(0.5, 0.12, MLP([input_feature, 32, 32, 64]), max_num_neighbors=64)
self.pts_sa2_module = SAModule(0.25, 0.25, MLP([64 + 3, 64, 64, 128]), max_num_neighbors=64)
self.pts_sa3_module = SAModule(0.25, 0.5, MLP([128 + 3, 256, 256, 256]), max_num_neighbors=64)
self.pts_sa4_module = GlobalSAModule(MLP([256 + 3, 256, 256, 512]))
self.pts_fp4_module = FPModule(1, MLP([512 + 256, 256, 256]))
self.pts_fp3_module = FPModule(3, MLP([256 + 128, 256, 128]))
self.pts_fp2_module = FPModule(3, MLP([128 + 64, 128, 64]))
self.pts_fp1_module = FPModule(3, MLP([64, 64, 64]))
self.pts_mlp = Seq(MLP([64, 64]), Lin(64, output_feature))
self.lin_vismask = Seq(MLP([2 * output_feature + 1, 256, 128, 64]), Lin(64, 1))
def forward(self, data, train_vismask, random_start=True):
geo_edge_index, tpl_edge_index, batch = data.geo_edge_index, data.tpl_edge_index, data.vtx_batch
x_1 = self.vtx_gcu_1(data.vtx, tpl_edge_index, geo_edge_index)
x_2 = self.vtx_gcu_2(x_1, tpl_edge_index, geo_edge_index)
x_3 = self.vtx_gcu_3(x_2, tpl_edge_index, geo_edge_index)
x_4 = self.vtx_gcu_4(x_3, tpl_edge_index, geo_edge_index)
x_5 = self.vtx_mlp_glb(torch.cat([x_1, x_2, x_3, x_4], dim=1))
x_global, _ = scatter_max(x_5, data.vtx_batch, dim=0)
x_global = torch.repeat_interleave(x_global, torch.bincount(data.vtx_batch), dim=0)
x_6 = torch.cat([x_global, data.vtx, x_1, x_2, x_3, x_4], dim=1)
out_vtx = self.vtx_mlp(x_6)
out_vtx = torch.nn.functional.normalize(out_vtx, dim=1)
pts_sa0_out = (None, data.pts, data.pts_batch)
pts_sa1_out = self.pts_sa1_module(*pts_sa0_out, random_start)
pts_sa2_out = self.pts_sa2_module(*pts_sa1_out, random_start)
pts_sa3_out = self.pts_sa3_module(*pts_sa2_out, random_start)
pts_sa4_out = self.pts_sa4_module(*pts_sa3_out)
pts_fp4_out = self.pts_fp4_module(*pts_sa4_out, *pts_sa3_out)
pts_fp3_out = self.pts_fp3_module(*pts_fp4_out, *pts_sa2_out)
pts_fp2_out = self.pts_fp2_module(*pts_fp3_out, *pts_sa1_out)
out_pts, _, _ = self.pts_fp1_module(*pts_fp2_out, *pts_sa0_out)
out_pts = self.pts_mlp(out_pts)
out_pts = torch.nn.functional.normalize(out_pts, dim=1)
if train_vismask: # torch_geometric.nn.knn only works on cuda
if torch.cuda.is_available():
assign_index = knn(out_pts, out_vtx, 1, data.pts_batch, data.vtx_batch, cosine=True)
out_combine = torch.cat([out_vtx[assign_index[0]], out_pts[assign_index[1]],torch.sum(out_vtx[assign_index[0]] * out_pts[assign_index[1]], dim=1)[:, None]], dim=1)
else:
out_combine = []
for i in range(len(torch.unique(data.vtx_batch))):
with torch.no_grad():
feature_similarity_i = torch.matmul(out_vtx[data.vtx_batch==i], out_pts[data.pts_batch==i].transpose(0, 1))
max_sim, nnidx = torch.max(feature_similarity_i, dim=1)
out_combine.append(torch.cat([out_vtx[data.vtx_batch==i], out_pts[data.pts_batch==i][nnidx], max_sim.unsqueeze(dim=1)], dim=1))
out_combine = torch.cat(out_combine, dim=0)
out_vismask = self.lin_vismask(out_combine)
else:
out_vismask = None
return out_vtx, out_pts, out_vismask, self.temprature
def corrnet(**kwargs):
model = CorrNet(input_feature=kwargs['input_feature'], output_feature=kwargs['output_feature'], temprature=kwargs['temprature'])
return model | 4,690 | 56.207317 | 179 | py |
MoRig | MoRig-master/models/customized_losses.py | import numpy as np
from sympy import im
import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch_scatter import scatter_max
from torch_cluster import fps
from itertools import combinations
def log_ratio_loss(pred_feature, gt_skin, batch):
num_sample = 50
epsilon = 1e-6
pairs = np.array(list(combinations(np.arange(num_sample), 2)))
feature_sample = []
gt_skin_sample = []
for i in range(len(torch.unique(batch))):
sample_ids = np.random.choice((batch == i).sum().item(), num_sample, replace=False)
feature_i = pred_feature[batch == i][sample_ids]
gt_skin_i = gt_skin[batch == i][sample_ids]
feature_sample.append(feature_i)
gt_skin_sample.append(gt_skin_i)
feature_sample = torch.stack(feature_sample, dim=0)
gt_skin_sample = torch.stack(gt_skin_sample, dim=0)
dist = torch.sum((feature_sample[:, pairs[:, 0], None, :] - feature_sample[:, None, pairs[:, 1], :])**2, dim=-1)
gt_dist = torch.sum((gt_skin_sample[:, pairs[:, 0], None, :] - gt_skin_sample[:, None, pairs[:, 1], :])**2, dim=-1)
log_dist = torch.log(dist + epsilon)
log_gt_dist = torch.log(gt_dist + epsilon)
diff_log_dist = log_dist.permute((0, 2, 1)) - log_dist
diff_log_gt_dist = log_gt_dist.permute((0, 2, 1)) - log_gt_dist
log_ratio_loss = (diff_log_dist - diff_log_gt_dist).pow(2)
# uniform weight coefficients
idxs = torch.arange(len(pairs)).cuda()
indc = idxs.repeat(len(pairs),1).t() < idxs.repeat(len(pairs), 1)
wgt = indc.clone().float()
wgt = wgt.div(wgt.sum())
loss = log_ratio_loss.mul(wgt).sum()
return loss / len(torch.unique(batch))
def hungarian_matching(pred_seg, gt_seg):
interset = np.matmul(pred_seg.T, gt_seg)
matching_cost = 1-np.divide(interset, np.expand_dims(np.sum(pred_seg,0), 1)+np.sum(gt_seg, axis=0, keepdims=True)-interset+1e-8)
row_ind, col_ind = linear_sum_assignment(matching_cost)
return np.vstack((row_ind, col_ind))
def motionLoss(pred_Rs, pred_ts, xyz, gt_flow, gt_seg):
""" pred_Rs: B x nsmp x 3 x 3,
pred_ts: B x nsmp x 1 x 3,
xyz: B x nsmp x 3,
gt_flow: B x nsmp x 3
gt_seg: B x nsmp x nstep """
ppdist = torch.unsqueeze(xyz, 1) - torch.unsqueeze(xyz, 2) # B x nsmp x nsmp x 3
ppdist = torch.matmul(ppdist, pred_Rs) + pred_ts + torch.unsqueeze(gt_flow, 2) # B x nsmp x nsmp x 3
loss_motion = ppdist-torch.unsqueeze(gt_flow,1)
gt_seg = torch.sum(torch.mul(torch.unsqueeze(gt_seg, 2), torch.unsqueeze(gt_seg, 1)), dim=-1)
gt_seg_normalized = torch.div(gt_seg, gt_seg.sum(dim=2, keepdim=True)+1e-8)
loss_motion = torch.sum(torch.square(loss_motion), dim=-1) # B x nsmp x nsmp
loss_motion = torch.mul(loss_motion, gt_seg_normalized)
loss_motion = torch.div(torch.sum(loss_motion), torch.sum(gt_seg_normalized))
return loss_motion
def groupingLoss(pred_support_matrix, seg_sub):
""" pred_support_matrix: B x nsmp x nsmp,
gt_seg: B x nsmp x nstep """
gt_seg = torch.sum(torch.mul(torch.unsqueeze(seg_sub, 2), torch.unsqueeze(seg_sub, 1)), dim=-1)
loss_group = torch.nn.functional.binary_cross_entropy_with_logits(pred_support_matrix, gt_seg.float())
return loss_group
def iouLoss(pred_seg, gt_seg, batch):
"""
pred_seg: B, nsmp
gt_seg: B,
batch: B
"""
pred_seg_np = pred_seg.data.to("cpu").numpy()
gt_seg_np = gt_seg.data.to("cpu").numpy()
batch_np = batch.data.to("cpu").numpy()
loss = 0.0
for i in range(len(torch.unique(batch))):
gt_seg_i = gt_seg[batch==i]
pred_seg_np_i = pred_seg_np[batch_np==i]
gt_seg_np_i = gt_seg_np[batch_np==i]
gt_seg_expand_i = torch.zeros((len(gt_seg_np_i), np.max(gt_seg_np_i)+1)).long().to(pred_seg.device)
gt_seg_expand_np_i = np.zeros((len(gt_seg_np_i), np.max(gt_seg_np_i)+1), dtype=np.int)
np.put_along_axis(gt_seg_expand_np_i, indices=gt_seg_np_i[:, None], values=1, axis=1)
gt_seg_expand_i.scatter_(dim=1, index=gt_seg_i[:, None], src=torch.ones_like(gt_seg_i[:, None]))
matching_id_i = hungarian_matching(pred_seg_np_i, gt_seg_expand_np_i)
pred_seg_i_reorder = pred_seg[batch==i][:, matching_id_i[0]]
gt_seg_i_reorder = gt_seg_expand_i[:, matching_id_i[1]]
interset = torch.sum(pred_seg_i_reorder * gt_seg_i_reorder, dim=0)
cost_i = 1 - torch.div(interset, pred_seg_i_reorder.sum(dim=0) + gt_seg_i_reorder.sum(dim=0) - interset + 1e-8)
loss = loss + cost_i.mean()
return loss / len(torch.unique(batch))
def infoNCE(vtx_feature, pts_feature, corr_v2p, corr_p2v, vtx_batch, pts_batch, corr_v2p_batch, corr_p2v_batch, tau):
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
loss = 0.0
for i in range(len(torch.unique(vtx_batch))):
vtx_feature_i = vtx_feature[vtx_batch == i]
pts_feature_i = pts_feature[pts_batch == i]
# v2p
corr_v2p_i = corr_v2p[corr_v2p_batch == i]
if len(corr_v2p_i) == 0:
loss += 0.0
continue
anchor = vtx_feature_i[corr_v2p_i[:, 0]]
prod = torch.mm(anchor, pts_feature_i.T) / tau
label_i = corr_v2p_i[:, 1]
loss_i = cross_entropy_loss(prod, label_i)
loss += loss_i.mean()
# p2v
corr_p2v_i = corr_p2v[corr_p2v_batch == i]
if len(corr_p2v_i) == 0:
loss += 0.0
continue
anchor = pts_feature_i[corr_p2v_i[:, 0]]
prod = torch.mm(anchor, vtx_feature_i.T) / tau
label_i = corr_p2v_i[:, 1]
loss_i = cross_entropy_loss(prod, label_i)
loss += loss_i.mean()
return loss / len(torch.unique(vtx_batch))
def multi_pos_infoNCE(pred_feature, gt_skin, batch):
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='mean')
loss = 0.0
for i in range(len(torch.unique(batch))):
sample_ids = np.random.choice((batch == i).sum().item(), 512, replace=False)
feature_i = pred_feature[batch == i][sample_ids]
gt_skin_i = gt_skin[batch == i][sample_ids]
gt_sim = (2 - torch.sum(torch.abs(gt_skin_i[None] - gt_skin_i[:, None]), axis=-1)) / 2.0
gt_sim = (gt_sim > 0.9).float()
pos_ids = torch.multinomial(gt_sim, 10, replacement=True)
neg_ids = torch.multinomial(1 - gt_sim, 200, replacement=True)
prod = torch.mm(feature_i, feature_i.T)
prod_neg = torch.gather(prod, dim=1, index=neg_ids)
loss_i = 0.0
for j in range(10):
prod_pos = torch.gather(prod, dim=1, index=pos_ids[:, j][:, None])
loss_i += cross_entropy_loss(torch.cat((prod_pos, prod_neg), dim=1), torch.zeros(512).long().to(pred_feature.device))
loss = loss + loss_i / 10
return loss / len(torch.unique(batch))
def hingeLoss(pred_feature, gt_label, batch_sub):
loss = 0.0
for i in range(len(torch.unique(batch_sub))):
sample_ids = np.random.choice((batch_sub == i).sum().item(), 256, replace=False)
gt_label_i = gt_label[batch_sub == i][sample_ids]
#gt_seg_expand_i = torch.nn.functional.one_hot(gt_label_i, num_classes=gt_label_i.max()+1)
pred_sim = torch.matmul(pred_feature[batch_sub == i][sample_ids], pred_feature[batch_sub==i][sample_ids].transpose(0, 1))
pred_dist = (1 - pred_sim) / 2.0
#gt_sim = torch.matmul(gt_seg_expand_i.float(), gt_seg_expand_i.transpose(0, 1).float())
gt_sim = (2 - torch.sum(torch.abs(gt_label_i[None] - gt_label_i[:, None]), axis=-1)) / 2.0
gt_sim = (gt_sim > 0.9).float()
weight = 10 * gt_sim
weight[weight == 0] = 1
gt_sim[gt_sim == 0] = -1
#loss += torch.nn.functional.hinge_embedding_loss(pred_dist, gt_sim, margin=0.2)
loss_i = torch.nn.functional.hinge_embedding_loss(pred_dist, gt_sim, margin=0.2, reduction="none")
loss_i = loss_i * weight
loss = loss + (loss_i * weight).sum() / weight.sum()
return loss / len(torch.unique(batch_sub))
def transLoss(adj_matrix, gt_seg, batch):
"""
adj_matrix: BKxKxT
gt_seg: BK
batch: BK
"""
loss = 0.0
for i in range(len(torch.unique(batch))):
gt_seg_i = gt_seg[batch==i]
adj_matrix_i = adj_matrix[batch==i]
gt_seg_expand_i = torch.nn.functional.one_hot(gt_seg_i, num_classes=gt_seg_i.max()+1)
gt_sim = torch.matmul(gt_seg_expand_i.float(), gt_seg_expand_i.transpose(0, 1).float())
idx_same_part = torch.nonzero(gt_sim, as_tuple=False)
dist_i = adj_matrix_i[idx_same_part[:, 0], idx_same_part[:, 1], :]
loss += dist_i.mean()
return loss / len(torch.unique(batch))
def multiLableBCE(feature_in, gt_seg, batch, tau=0.05):
cross_entropy_loss = torch.nn.BCEWithLogitsLoss(reduction='mean')
loss = 0.0
for i in range(len(torch.unique(batch))):
feature_i = feature_in[batch==i]
prod = torch.mm(feature_i, feature_i.T) / tau
gt_seg_i = gt_seg[batch==i]
gt_seg_expand_i = torch.nn.functional.one_hot(gt_seg_i, num_classes=gt_seg_i.max()+1)
gt_sim = torch.matmul(gt_seg_expand_i.float(), gt_seg_expand_i.transpose(0, 1).float())
loss += cross_entropy_loss(prod, gt_sim)
loss = loss / len(torch.unique(batch))
return loss
def cross_entropy_with_probs(input, target, weight=None, reduction="mean"):
input_logsoftmax = F.log_softmax(input, dim=1)
cum_losses = -target * input_logsoftmax
if weight is not None:
cum_losses = cum_losses * weight
if reduction == "none":
return cum_losses
elif reduction == "mean":
return cum_losses.sum(dim=1).mean()
elif reduction == "sum":
return cum_losses.sum()
else:
raise ValueError("Keyword 'reduction' must be one of ['none', 'mean', 'sum']")
def chamfer_distance_with_average(p1, p2):
'''
Calculate Chamfer Distance between two point sets
:param p1: size[1, N, D]
:param p2: size[1, M, D]
:param debug: whether need to output debug info
:return: sum of Chamfer Distance of two point sets
'''
assert p1.size(0) == 1 and p2.size(0) == 1
assert p1.size(2) == p2.size(2)
p1 = p1.repeat(p2.size(1), 1, 1)
p1 = p1.transpose(0, 1)
p2 = p2.repeat(p1.size(0), 1, 1)
dist = torch.add(p1, torch.neg(p2))
dist_norm = torch.norm(dist, 2, dim=2)
dist1 = torch.min(dist_norm, dim=1)[0]
dist2 = torch.min(dist_norm, dim=0)[0]
loss = 0.5 * ((torch.mean(dist1)) + (torch.mean(dist2)))
return loss
def skin_difference_loss(pred_skin, gt_skin, pos, batch):
ids_sub = fps(pos, batch=batch, ratio=0.25, random_start=True)
pred_skin_sub = pred_skin[ids_sub]
gt_skin_sub = gt_skin[ids_sub]
batch_sub = batch[ids_sub]
loss = 0.0
for i in range(len(torch.unique(batch_sub))):
pred_skin_i = pred_skin_sub[batch_sub == i]
gt_skin_i = gt_skin_sub[batch_sub == i]
pred_diffmat = torch.sum(torch.abs(pred_skin_i[:, None, :] - pred_skin_i[None, ...]), dim=-1)
gt_diffmat = torch.sum(torch.abs(gt_skin_i[:, None, :] - gt_skin_i[None, ...]), dim=-1)
pred_diffmat = pred_diffmat * (torch.abs(gt_diffmat) < 1e-6).float()
loss += pred_diffmat.mean()
return loss / len(torch.unique(batch))
def multi_positive_infonce_skinning(pred_feature, gt_skin, batch):
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='mean')
loss = 0.0
for i in range(len(torch.unique(batch))):
sample_ids = np.random.choice((batch == i).sum().item(), 512, replace=False)
feature_i = pred_feature[batch == i][sample_ids]
gt_skin_i = gt_skin[batch == i][sample_ids]
gt_sim = (2 - torch.sum(torch.abs(gt_skin_i[None] - gt_skin_i[:, None]), dim=-1)) / 2.0
gt_sim = (gt_sim > 0.9).float()
pos_ids = torch.multinomial(gt_sim, 10, replacement=True)
neg_ids = torch.multinomial(1 - gt_sim, 200, replacement=True)
pred_sim = (2 - torch.sum(torch.abs(feature_i[:, None, :] - feature_i[None, ...]), dim=-1)) / 2.0
pred_neg = torch.gather(pred_sim, dim=1, index=neg_ids)
loss_i = 0.0
for j in range(10):
pred_pos = torch.gather(pred_sim, dim=1, index=pos_ids[:, j][:, None])
loss_i += cross_entropy_loss(torch.cat((pred_pos, pred_neg), dim=1), torch.zeros(512).long().to(pred_feature.device))
loss = loss + loss_i / 10
return loss / len(torch.unique(batch)) | 12,523 | 42.037801 | 132 | py |
MoRig | MoRig-master/datasets/dataset_shape.py | import os
import torch
import numpy as np
import glob
import open3d as o3d
from tqdm import tqdm
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.utils import add_self_loops
class GraphData(Data):
def __init__(self, vtx=None, pts=None, corr_v2p=None, corr_p2v=None, vismask=None, flow=None,
tpl_edge_index=None, geo_edge_index=None, name=None):
super(GraphData, self).__init__()
self.vtx = vtx
self.pts = pts
self.corr_v2p = corr_v2p
self.corr_p2v = corr_p2v
self.vismask = vismask
self.flow = flow
self.tpl_edge_index = tpl_edge_index
self.geo_edge_index = geo_edge_index
self.name = name
def __inc__(self, key, value, *args, **kwargs):
if "edge_index" in key:
return self.vtx.size(0)
else:
return super(GraphData, self).__inc__(key, value)
class ModelsResourcesShapeDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(ModelsResourcesShapeDataset, self).__init__(root, transform=transform, pre_transform=pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
raw_filelist = glob.glob(os.path.join(self.root, f'*_0.obj'))
return raw_filelist
@property
def processed_file_names(self):
return '{:s}_mr_shape_data.pt'.format(self.root.split('/')[-1])
def __len__(self):
return len(self.raw_paths)
def download(self):
pass
def process(self):
data_list = []
for i in tqdm(range(len(self.raw_paths))):
name = self.raw_paths[i].split('/')[-1].split('_')[0]
# load everything
mesh = o3d.io.read_triangle_mesh(self.raw_paths[i])
vtx = np.asarray(mesh.vertices)
pts = np.load(self.raw_paths[i].replace("_0.obj", "_pts.npy"))
flow = np.load(self.raw_paths[i].replace("_0.obj", "_flow.npy"))
corr_v2p = np.load(self.raw_paths[i].replace("_0.obj", "_corr_v2p.npy"))
corr_p2v = np.load(self.raw_paths[i].replace("_0.obj", "_corr_p2v.npy"))
vismask = np.load(self.raw_paths[i].replace("_0.obj", "_vismask.npy"))
tpl_e = np.loadtxt(self.raw_paths[i].replace('_0.obj', '_tpl_e.txt')).T
geo_e = np.loadtxt(self.raw_paths[i].replace('_0.obj', '_geo_e.txt')).T
vtx = torch.from_numpy(vtx).float()
pts = torch.from_numpy(pts).float()
flow = torch.from_numpy(flow).float()
corr_v2p = torch.from_numpy(corr_v2p).long()
corr_p2v = torch.from_numpy(corr_p2v).long()
vismask = torch.from_numpy(vismask).float()
tpl_e = torch.from_numpy(tpl_e).long()
geo_e = torch.from_numpy(geo_e).long()
tpl_e, _ = add_self_loops(tpl_e, num_nodes=vtx.size(0))
geo_e, _ = add_self_loops(geo_e, num_nodes=vtx.size(0))
# add to data class
data = GraphData(vtx=vtx, pts=pts, corr_v2p=corr_v2p, corr_p2v=corr_p2v, vismask=vismask, flow=flow,
tpl_edge_index=tpl_e, geo_edge_index=geo_e, name=name)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
| 3,400 | 39.975904 | 113 | py |
MoRig | MoRig-master/datasets/dataset_rig.py | import os
import torch
import numpy as np
import glob
from tqdm import tqdm
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.utils import add_self_loops
from utils.rig_parser import Rig
class RigDataset(InMemoryDataset):
def __init__(self, root):
super(RigDataset, self).__init__(root)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
raw_v_filelist = glob.glob(os.path.join(self.root, '*_vtx_traj.npy'))
return raw_v_filelist
@property
def processed_file_names(self):
return '{:s}_rig_data.pt'.format(self.root.split('/')[-1])
def __len__(self):
return len(self.raw_paths)
def download(self):
pass
def load_skin(self, filename):
with open(filename, 'r') as fin:
lines = fin.readlines()
bones = []
bone_names = []
input = []
label = []
nearest_bone_ids = []
loss_mask_all = []
for li in lines:
words = li.strip().split()
if words[0] == 'bones':
bone_names.append([words[1], words[2]])
bones.append([float(w) for w in words[3:]])
elif words[0] == 'bind':
words = [float(w) for w in words[1:]]
sample_input = []
sample_nearest_bone_ids = []
loss_mask = []
for i in range(self.num_nearest_bone):
if int(words[3 * i + 1]) == -1:
## walk-round. however words[3] may also be invalid.
sample_nearest_bone_ids.append(int(words[1]))
sample_input += bones[int(words[1])]
sample_input.append(words[2])
sample_input.append(int(words[3]))
loss_mask.append(0)
else:
sample_nearest_bone_ids.append(int(words[3 * i + 1]))
sample_input += bones[int(words[3 * i + 1])]
sample_input.append(words[3 * i + 2])
sample_input.append(int(words[3 * i + 3]))
loss_mask.append(1)
input.append(np.array(sample_input)[np.newaxis, :])
nearest_bone_ids.append(np.array(sample_nearest_bone_ids)[np.newaxis, :])
loss_mask_all.append(np.array(loss_mask)[np.newaxis, :])
elif words[0] == 'influence':
sample_label = np.array([float(w) for w in words[1:]])[np.newaxis, :]
label.append(sample_label)
input = np.concatenate(input, axis=0)
nearest_bone_ids = np.concatenate(nearest_bone_ids, axis=0)
label = np.concatenate(label, axis=0)
loss_mask_all = np.concatenate(loss_mask_all, axis=0)
return input, nearest_bone_ids, label, loss_mask_all, bone_names
def process(self):
data_list = []
self.num_nearest_bone = 20
num_max_joint = 48
for vtx_filename in tqdm(self.raw_paths):
v_traj = np.load(vtx_filename)
m = np.loadtxt(vtx_filename.replace('_vtx_traj.npy', '_attn.txt'))
tpl_e = np.loadtxt(vtx_filename.replace('_vtx_traj.npy', '_tpl_e.txt')).T
geo_e = np.loadtxt(vtx_filename.replace('_vtx_traj.npy', '_geo_e.txt')).T
rig = Rig(vtx_filename.replace('_vtx_traj.npy', '_rig.txt'))
joints = rig.pos
name = int(vtx_filename.split('/')[-1].split('_')[0])
nearest_jid = np.argmin(np.sum((joints[:, None, :] - v_traj[:, 0, :][None, ...])**2, axis=-1), axis=0)
offsets = joints[nearest_jid] - v_traj[:, 0, :]
gt_skin = np.zeros((rig.skins.shape[0], num_max_joint))
gt_skin[:, 0:rig.skins.shape[1]] = rig.skins
skin_input, skin_nn, skin_label, loss_mask, bone_names = self.load_skin(vtx_filename.replace('_vtx_traj.npy', '_skin.txt'))
# get nearest joint IDs for alignment
skin_nnjids = []
for vid in range(len(v_traj)):
skin_nnjids_v = []
for n in skin_nn[vid]:
skin_nnjids_v.append(rig.names.index(bone_names[n][0]))
skin_nnjids.append(np.array(skin_nnjids_v))
skin_nnjids = np.stack(skin_nnjids, 0)
# gt flow
gt_flow = []
for key_t in np.arange(20, 110, 20):
gt_flow.append(v_traj[:, key_t, :] - v_traj[:, 0, :])
gt_flow = np.concatenate(gt_flow, axis=1)
# pred flow: you need to first train a deformation module to get them ;)
pred_flow = []
for key_t in np.arange(1, 6):
pred_flow_t = np.load(os.path.join(self.root, f"pred_flow/{name}_{key_t}_pred_flow.npy"))
pred_flow.append(pred_flow_t)
pred_flow = np.concatenate(pred_flow, axis=1)
pos = torch.from_numpy(v_traj[:, 0, :]).float()
m = torch.from_numpy(m).float()
tpl_e = torch.from_numpy(tpl_e).long()
geo_e = torch.from_numpy(geo_e).long()
tpl_e, _ = add_self_loops(tpl_e, num_nodes=pos.size(0))
geo_e, _ = add_self_loops(geo_e, num_nodes=pos.size(0))
offsets = torch.from_numpy(offsets).float()
gt_flow = torch.from_numpy(gt_flow).float()
pred_flow = torch.from_numpy(pred_flow).float()
joints = torch.from_numpy(joints).float()
skin_input = torch.from_numpy(skin_input).float()
skin_label = torch.from_numpy(skin_label).float()
skin_nn = torch.from_numpy(skin_nn).long()
skin_nnjids = torch.from_numpy(skin_nnjids).long()
loss_mask = torch.from_numpy(loss_mask).long()
gt_skin = torch.from_numpy(gt_skin).float()
data_list.append(Data(pos=pos, tpl_edge_index=tpl_e, geo_edge_index=geo_e,
pred_flow=pred_flow, gt_flow=gt_flow, name=name,
mask=m, joints=joints, offsets=offsets, gt_skin=gt_skin,
skin_input=skin_input, skin_label=skin_label, skin_nn=skin_nn,
skin_nnjids=skin_nnjids, loss_mask=loss_mask))
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
| 6,470 | 44.893617 | 135 | py |
MoRig | MoRig-master/datasets/dataset_pose.py | import os, glob, numpy as np
from tqdm import tqdm
import torch
from torch_geometric.data import Data, Dataset, InMemoryDataset
from torch_geometric.utils import add_self_loops
class GraphData(Data):
def __init__(self, vtx_traj=None, pts_traj=None, corr_v2p_all=None, corr_p2v_all=None,
vismask_all=None, tpl_edge_index=None, geo_edge_index=None, name=None):
super(GraphData, self).__init__()
self.vtx_traj = vtx_traj
self.pts_traj = pts_traj
self.corr_v2p_all = corr_v2p_all
self.corr_p2v_all = corr_p2v_all
self.vismask_all = vismask_all
self.tpl_edge_index = tpl_edge_index
self.geo_edge_index = geo_edge_index
self.name = name
def __inc__(self, key, value, *args, **kwargs):
if "edge_index" in key:
return self.vtx_traj.size(0)
else:
return super(GraphData, self).__inc__(key, value)
class ModelsResourcesDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(ModelsResourcesDataset, self).__init__(root, transform=transform, pre_transform=pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
raw_filelist = glob.glob(os.path.join(self.root, f'*_vtx_traj.npy'))
return raw_filelist
@property
def processed_file_names(self):
return '{:s}_mr_pose_data.pt'.format(self.root.split('/')[-1])
def __len__(self):
return len(self.raw_paths)
def download(self):
pass
def process(self):
data_list = []
for i in tqdm(range(len(self.raw_paths))):
name = self.raw_paths[i].split('/')[-1].split('_')[0]
vtx_traj = np.load(self.raw_paths[i])
vtx_traj = vtx_traj.reshape(-1, 303)
pts_traj = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_pts_traj.npy"))
corr_v2p = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_v2p.npy"))
corr_p2v = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_p2v.npy"))
vismask = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_vismask.npy"))
tpl_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_tpl_e.txt')).T
geo_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_geo_e.txt')).T
# pick frames
vtx_frames = []
pts_frames = []
corr_v2p_frames = []
corr_p2v_frames = []
vismask_frames = []
for key_t in np.arange(0, 110, 20):
vtx_frames.append(vtx_traj[:, 3*key_t:3*(key_t+1)])
pts_frames.append(pts_traj[:, 3*key_t:3*(key_t+1)])
corr_v2p_frames.append(corr_v2p[corr_v2p[:, -1] == key_t])
corr_p2v_frames.append(corr_p2v[corr_p2v[:, -1] == key_t])
vismask_frames.append(vismask[:, key_t])
vtx_frames = np.concatenate(vtx_frames, axis=1)
pts_frames = np.concatenate(pts_frames, axis=1)
corr_v2p_frames = np.concatenate(corr_v2p_frames, axis=0)
corr_p2v_frames = np.concatenate(corr_p2v_frames, axis=0)
corr_v2p_frames[:, -1] = corr_v2p_frames[:, -1] / 20
corr_p2v_frames[:, -1] = corr_p2v_frames[:, -1] / 20
vismask_frames = np.stack(vismask_frames, axis=1)
# convert to tensor
vtx_traj = torch.from_numpy(vtx_frames).float()
pts_traj = torch.from_numpy(pts_frames).float()
corr_v2p = torch.from_numpy(corr_v2p_frames).long()
corr_p2v = torch.from_numpy(corr_p2v_frames).long()
vismask = torch.from_numpy(vismask_frames).float()
tpl_e = torch.from_numpy(tpl_e).long()
geo_e = torch.from_numpy(geo_e).long()
tpl_e, _ = add_self_loops(tpl_e, num_nodes=vtx_traj.size(0))
geo_e, _ = add_self_loops(geo_e, num_nodes=vtx_traj.size(0))
# add to data class
data = GraphData(vtx_traj=vtx_traj, pts_traj=pts_traj,
corr_v2p_all=corr_v2p, corr_p2v_all=corr_p2v, vismask_all=vismask,
tpl_edge_index=tpl_e, geo_edge_index=geo_e, name=name)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
class SeqModelsResourcesDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(SeqModelsResourcesDataset, self).__init__(root, transform=transform, pre_transform=pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
raw_filelist = glob.glob(os.path.join(self.root, f'*_vtx_traj.npy'))
return raw_filelist
@property
def processed_file_names(self):
return '{:s}_mr_seq_pose_data.pt'.format(self.root.split('/')[-1])
def __len__(self):
return len(self.raw_paths)
def download(self):
pass
def process(self):
data_list = []
for i in tqdm(range(len(self.raw_paths))):
name = self.raw_paths[i].split('/')[-1].split('_')[0]
vtx_traj = np.load(self.raw_paths[i])
vtx_traj = vtx_traj.reshape(-1, 303)
pts_traj = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_pts_traj.npy"))
corr_v2p = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_v2p.npy"))
corr_p2v = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_p2v.npy"))
vismask = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_vismask.npy"))
tpl_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_tpl_e.txt')).T
geo_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_geo_e.txt')).T
# pick frames
vtx_frames = []
pts_frames = []
corr_v2p_frames = []
corr_p2v_frames = []
vismask_frames = []
for key_t in np.arange(0, 21):
vtx_frames.append(vtx_traj[:, 3*key_t:3*(key_t+1)])
pts_frames.append(pts_traj[:, 3*key_t:3*(key_t+1)])
corr_v2p_frames.append(corr_v2p[corr_v2p[:, -1] == key_t])
corr_p2v_frames.append(corr_p2v[corr_p2v[:, -1] == key_t])
vismask_frames.append(vismask[:, key_t])
vtx_frames = np.concatenate(vtx_frames, axis=1)
pts_frames = np.concatenate(pts_frames, axis=1)
corr_v2p_frames = np.concatenate(corr_v2p_frames, axis=0)
corr_p2v_frames = np.concatenate(corr_p2v_frames, axis=0)
corr_v2p_frames[:, -1] = corr_v2p_frames[:, -1]
corr_p2v_frames[:, -1] = corr_p2v_frames[:, -1]
vismask_frames = np.stack(vismask_frames, axis=1)
# convert to tensor
vtx_traj = torch.from_numpy(vtx_frames).float()
pts_traj = torch.from_numpy(pts_frames).float()
corr_v2p = torch.from_numpy(corr_v2p_frames).long()
corr_p2v = torch.from_numpy(corr_p2v_frames).long()
vismask = torch.from_numpy(vismask_frames).float()
tpl_e = torch.from_numpy(tpl_e).long()
geo_e = torch.from_numpy(geo_e).long()
tpl_e, _ = add_self_loops(tpl_e, num_nodes=vtx_traj.size(0))
geo_e, _ = add_self_loops(geo_e, num_nodes=vtx_traj.size(0))
# add to data class
data = GraphData(vtx_traj=vtx_traj, pts_traj=pts_traj,
corr_v2p_all=corr_v2p, corr_p2v_all=corr_p2v, vismask_all=vismask,
tpl_edge_index=tpl_e, geo_edge_index=geo_e, name=name)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
class DeformingThingsDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(DeformingThingsDataset, self).__init__(root, transform=transform, pre_transform=pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
raw_filelist = glob.glob(os.path.join(self.root, f'*_vtx_traj.npy'))
return raw_filelist
@property
def processed_file_names(self):
return '{:s}_dt_pose_data.pt'.format(self.root.split('/')[-1])
def __len__(self):
return len(self.raw_paths)
def download(self):
pass
def process(self):
data_list = []
for i in tqdm(range(len(self.raw_paths))):
name = self.raw_paths[i].split('/')[-1].split('_vtx_traj.npy')[0]
vtx_traj = np.load(self.raw_paths[i])
pts_traj = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_pts_traj.npy"))
vtx_traj = vtx_traj.reshape(-1, 300)
pts_traj = pts_traj.reshape(-1, 300)
corr_v2p = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_v2p.npy"))
corr_p2v = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_p2v.npy"))
vismask = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_vismask.npy"))
tpl_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_tpl_e.txt')).T
geo_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_geo_e.txt')).T
# pick frames
vtx_frames = []
pts_frames = []
corr_v2p_frames = []
corr_p2v_frames = []
vismask_frames = []
for key_t in np.arange(0, 100, 19):
vtx_frames.append(vtx_traj[:, 3*key_t:3*(key_t+1)])
pts_frames.append(pts_traj[:, 3*key_t:3*(key_t+1)])
corr_v2p_frames.append(corr_v2p[corr_v2p[:, -1] == key_t])
corr_p2v_frames.append(corr_p2v[corr_p2v[:, -1] == key_t])
vismask_frames.append(vismask[:, key_t])
vtx_frames = np.concatenate(vtx_frames, axis=1)
pts_frames = np.concatenate(pts_frames, axis=1)
corr_v2p_frames = np.concatenate(corr_v2p_frames, axis=0)
corr_p2v_frames = np.concatenate(corr_p2v_frames, axis=0)
corr_v2p_frames[:, -1] = corr_v2p_frames[:, -1] / 19
corr_p2v_frames[:, -1] = corr_p2v_frames[:, -1] / 19
vismask_frames = np.stack(vismask_frames, axis=1)
# convert to tensor
vtx_traj = torch.from_numpy(vtx_frames).float()
pts_traj = torch.from_numpy(pts_frames).float()
corr_v2p = torch.from_numpy(corr_v2p_frames).long()
corr_p2v = torch.from_numpy(corr_p2v_frames).long()
vismask = torch.from_numpy(vismask_frames).float()
tpl_e = torch.from_numpy(tpl_e).long()
geo_e = torch.from_numpy(geo_e).long()
tpl_e, _ = add_self_loops(tpl_e, num_nodes=vtx_traj.size(0))
geo_e, _ = add_self_loops(geo_e, num_nodes=vtx_traj.size(0))
# add to data class
data = GraphData(vtx_traj=vtx_traj, pts_traj=pts_traj,
corr_v2p_all=corr_v2p, corr_p2v_all=corr_p2v, vismask_all=vismask,
tpl_edge_index=tpl_e, geo_edge_index=geo_e, name=name)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
class SeqDeformingThingsDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(SeqDeformingThingsDataset, self).__init__(root, transform=transform, pre_transform=pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
raw_filelist = glob.glob(os.path.join(self.root, f'*_vtx_traj.npy'))
return raw_filelist
@property
def processed_file_names(self):
return '{:s}_dt_seq_pose_data.pt'.format(self.root.split('/')[-1])
def __len__(self):
return len(self.raw_paths)
def download(self):
pass
def process(self):
data_list = []
for i in tqdm(range(len(self.raw_paths))):
name = self.raw_paths[i].split('/')[-1].split('_vtx_traj')[0]
vtx_traj = np.load(self.raw_paths[i])
pts_traj = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_pts_traj.npy"))
vtx_traj = vtx_traj.reshape(-1, 300)
pts_traj = pts_traj.reshape(-1, 300)
corr_v2p = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_v2p.npy"))
corr_p2v = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_corr_p2v.npy"))
vismask = np.load(self.raw_paths[i].replace("_vtx_traj.npy", "_vismask.npy"))
tpl_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_tpl_e.txt')).T
geo_e = np.loadtxt(self.raw_paths[i].replace('_vtx_traj.npy', '_geo_e.txt')).T
# pick frames
vtx_frames = []
pts_frames = []
corr_v2p_frames = []
corr_p2v_frames = []
vismask_frames = []
for key_t in np.arange(0, 21):
vtx_frames.append(vtx_traj[:, 3*key_t:3*(key_t+1)])
pts_frames.append(pts_traj[:, 3*key_t:3*(key_t+1)])
corr_v2p_frames.append(corr_v2p[corr_v2p[:, -1] == key_t])
corr_p2v_frames.append(corr_p2v[corr_p2v[:, -1] == key_t])
vismask_frames.append(vismask[:, key_t])
vtx_frames = np.concatenate(vtx_frames, axis=1)
pts_frames = np.concatenate(pts_frames, axis=1)
corr_v2p_frames = np.concatenate(corr_v2p_frames, axis=0)
corr_p2v_frames = np.concatenate(corr_p2v_frames, axis=0)
corr_v2p_frames[:, -1] = corr_v2p_frames[:, -1]
corr_p2v_frames[:, -1] = corr_p2v_frames[:, -1]
vismask_frames = np.stack(vismask_frames, axis=1)
# convert to tensor
vtx_traj = torch.from_numpy(vtx_frames).float()
pts_traj = torch.from_numpy(pts_frames).float()
corr_v2p = torch.from_numpy(corr_v2p_frames).long()
corr_p2v = torch.from_numpy(corr_p2v_frames).long()
vismask = torch.from_numpy(vismask_frames).float()
tpl_e = torch.from_numpy(tpl_e).long()
geo_e = torch.from_numpy(geo_e).long()
tpl_e, _ = add_self_loops(tpl_e, num_nodes=vtx_traj.size(0))
geo_e, _ = add_self_loops(geo_e, num_nodes=vtx_traj.size(0))
# add to data class
data = GraphData(vtx_traj=vtx_traj, pts_traj=pts_traj,
corr_v2p_all=corr_v2p, corr_p2v_all=corr_p2v, vismask_all=vismask,
tpl_edge_index=tpl_e, geo_edge_index=geo_e, name=name)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0]) | 15,087 | 46.297806 | 111 | py |
MoRig | MoRig-master/evaluate/eval_tracking.py | import sys
sys.path.append("./")
import argparse, os, cv2, glob, copy, numpy as np, open3d as o3d, matplotlib.pyplot as plt, time
from scipy.spatial.transform import Rotation
from tqdm import tqdm
from sklearn.cluster import KMeans
import torch
from torch_geometric.utils import add_self_loops
import models
from datasets.dataset_pose import GraphData
from utils.rig_parser import Rig
from utils.os_utils import mkdir_p
from utils.vis_utils import visualize_seg, visualize_track, show_obj_rig
from utils.deform_ik import Deform_IK
from utils.piecewise_ransac import Piecewise_RANSAC
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_deformnet(deformnet_path):
deformnet = models.__dict__["deformnet"](tau_nce=0.07, num_interp=5)
deformnet.to(device)
if torch.cuda.is_available():
deformnet.load_state_dict(torch.load(deformnet_path)['state_dict'])
else:
deformnet.load_state_dict(torch.load(deformnet_path, map_location=torch.device('cpu'))['state_dict'])
deformnet.eval()
return deformnet
def run_deform_net_inference(flow_net, vtx_in, pts_in, tpl_e, geo_e):
vtx = torch.from_numpy(vtx_in).float()
pts = torch.from_numpy(pts_in).float()
tpl_e_tensor = torch.from_numpy(tpl_e).long()
geo_e_tensor = torch.from_numpy(geo_e).long()
tpl_e_tensor, _ = add_self_loops(tpl_e_tensor, num_nodes=len(vtx))
geo_e_tensor, _ = add_self_loops(geo_e_tensor, num_nodes=len(vtx))
data = GraphData(tpl_edge_index=tpl_e_tensor, geo_edge_index=geo_e_tensor)
data.vtx = torch.from_numpy(vtx_in).float()
data.pts = torch.from_numpy(pts_in).float()
data.vtx_batch = torch.zeros(len(vtx), dtype=torch.long)
data.pts_batch = torch.zeros(len(pts), dtype=torch.long)
data.to(device)
with torch.no_grad():
pred_flow, vtx_feature, pts_feature, pred_vismask, tau = flow_net(data)
pred_flow = pred_flow.to("cpu").numpy()
pred_vismask = pred_vismask.to("cpu").numpy()
vtx_feature = vtx_feature.to("cpu").numpy()
pts_feature = pts_feature.to("cpu").numpy()
corr_matrix = np.matmul(vtx_feature, pts_feature.T)
vert_shift = vtx_in + pred_flow
return vert_shift, pred_vismask, corr_matrix
def ik_drag(vtx_src, vtx_dst, pts_dst, rig, corrmat, vismask):
vert_src = np.column_stack((vtx_src, np.ones(len(vtx_src)))).T
vert_src = torch.from_numpy(vert_src)
vert_dst = torch.from_numpy(vtx_dst)
rig_globals_inv = torch.inverse(torch.from_numpy(rig.global_transforms_homogeneous))
vert_src_local = torch.matmul(rig_globals_inv, vert_src[None, ...])
deformer = Deform_IK(vismask_thrd=0.3) # vismask_thrd=0.30
locals_update, globals_update, jpos = \
deformer.run(locals_in=torch.from_numpy(rig.local_frames).float(),
offsets=torch.from_numpy(rig.offset).float(),
parent=rig.hierarchy,
root_id=rig.root_id,
vert_local=vert_src_local.float(),
skinning=torch.from_numpy(rig.skins).float(),
constraints=vert_dst.float(),
vismask=torch.from_numpy(vismask).float(),
iter_time=200)
locals_update = locals_update.detach().numpy()
jpos = jpos.detach().numpy()
rig_update = copy.deepcopy(rig)
rig_update.pos = jpos
rig_update.local_frames = locals_update
rig_update.FK()
vert_update = np.matmul(rig_update.global_transforms_homogeneous, vert_src_local.numpy())
vtx_update_glb = np.sum(vert_update * rig_update.skins.T[:, None, :], axis=0)[0:3].T
if corrmat is not None:
''' 2nd time: t2 mesh -> gt points IK '''
max_sim = np.max(corrmat, axis=1)
nnidx = np.argmax(corrmat, axis=1)
corr_list = np.zeros((corrmat.shape[1], 3))
for idx in range(len(nnidx)):
if (max_sim[idx] > corr_list[nnidx[idx], -1]):
corr_list[nnidx[idx], 0] = idx
corr_list[nnidx[idx], 1] = nnidx[idx]
corr_list[nnidx[idx], 2] = max_sim[idx]
# cor dis
thd = 0.5
nnidx = np.where(corr_list[:, -1] > thd)[0]
corr_list = corr_list[nnidx, 0:2]
vert_src = np.column_stack((vtx_update_glb, np.ones(len(vtx_update_glb)))).T
vert_src = torch.from_numpy(vert_src)
vert_src_all = copy.deepcopy(vert_src)
vert_dst = torch.from_numpy(pts_dst)
vert_src = vert_src[:, corr_list[:, 0]]
vert_dst = vert_dst[corr_list[:, 1], :]
# l2 dis
l2_dis = torch.sum((vert_src[0:3, :].T - vert_dst)**2, dim=-1)
nnidx = torch.where(l2_dis < 1e-2)[0]
#print('close correspondent pair len after corr_matrix', corr_list.shape[0], ', after L2 constrain', nnidx.shape[0])
vert_src = vert_src[:, nnidx]
vert_dst = vert_dst[nnidx, :]
corr_list = corr_list[nnidx, :]
rig_globals_inv = torch.inverse(torch.from_numpy(rig_update.global_transforms_homogeneous))
vert_src_local = torch.matmul(rig_globals_inv, vert_src[None, ...])
locals_update, globals_update, jpos = \
deformer.run(locals_in=torch.from_numpy(rig_update.local_frames).float(),
offsets=torch.from_numpy(rig_update.offset).float(),
parent=rig_update.hierarchy,
root_id=rig_update.root_id,
vert_local=vert_src_local.float(),
skinning=torch.from_numpy(rig_update.skins).float()[corr_list[:, 0]],
constraints=vert_dst.float(),
vismask=torch.from_numpy(vismask).float()[corr_list[:, 0]],
iter_time=400, lr=1e-3, w_invis=0.0)
locals_update = locals_update.detach().numpy()
jpos = jpos.detach().numpy()
rig_update2 = copy.deepcopy(rig_update)
rig_update2.pos = jpos
rig_update2.local_frames = locals_update
rig_update2.FK()
vert_src_all_local = torch.matmul(rig_globals_inv, vert_src_all[None, ...])
vert_update = np.matmul(rig_update2.global_transforms_homogeneous,
vert_src_all_local.numpy())
vtx_update_glb = np.sum(vert_update * rig_update2.skins.T[:, None, :], axis=0)[0:3].T
# pcd_1 = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(vtx_src))
# pcd_1.paint_uniform_color([0.0, 0.0, 1.0])
# pcd_2 = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(vtx_update_glb))
# pcd_2.paint_uniform_color([0.0, 0.0, 1.0])
# pcd_gt = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(pts_dst))
# pcd_gt.paint_uniform_color([0.0, 0.0, 1.0])
# vis = o3d.visualization.Visualizer()
# vis.create_window()
# vis.add_geometry(pcd_1)
# vis.add_geometry(pcd_2.translate([1.0, 0.0, 0.0]))
# vis.add_geometry(pcd_gt.translate([2.0, 0.0, 0.0]))
# vis.run()
# vis.destroy_window()
return vtx_update_glb, Rotation.from_matrix(rig_update2.local_frames).as_quat()
else:
return vtx_update_glb, Rotation.from_matrix(rig_update.local_frames).as_quat()
def tracking_one(vtx_ori, rig, pts_traj, tpl_e, geo_e, deformnet):
seq_length = pts_traj.shape[1]
pred_vtx_traj, pred_vismask, pred_quats = [vtx_ori], [], []
for t in tqdm(range(1, seq_length)):
pts_tar = pts_traj[:, t, :]
vert_shift, pred_vismask_t, corr_matrix_t = run_deform_net_inference(deformnet, pred_vtx_traj[-1], pts_tar, tpl_e, geo_e)
vert_shift, quats_t = ik_drag(pred_vtx_traj[0], vert_shift, pts_tar, rig, corr_matrix_t, pred_vismask_t.squeeze(axis=1))
pred_vtx_traj.append(vert_shift)
pred_vismask.append(pred_vismask_t.squeeze(axis=1))
pred_quats.append(quats_t)
pred_vtx_traj = np.stack(pred_vtx_traj[1:], axis=1)
pred_vismask = np.stack(pred_vismask, axis=1)
pred_quats = np.stack(pred_quats, axis=1)
return pred_vtx_traj, pred_vismask, pred_quats
def plot(type="full"):
none_folder = "/mnt/neghvar/mnt/DATA_LINUX/zhan/output/none_rigs/tracking_loss_pred_flow/"
rignet_folder = "/mnt/neghvar/mnt/DATA_LINUX/zhan/output/rignet_rigs/tracking_loss_pred_flow/"
ours_folder = "/mnt/neghvar/mnt/DATA_LINUX/zhan/output/ours_rigs_no_dup/tracking_loss_pred_flow/"
ours_gt_flow_folder = "/mnt/neghvar/mnt/DATA_LINUX/zhan/output/ours_rigs_gt_flow/tracking_loss_gt_flow/"
skerig_folder = "/mnt/neghvar/mnt/DATA_LINUX/zhan/output/SkeRig_pred_flow/tracking_loss_pred_flow/"
skerig_gt_flow_folder = "/mnt/neghvar/mnt/DATA_LINUX/zhan/output/SkeRig_gt_flow/tracking_loss_gt_flow/"
model_list = ['4140', '8184', '18897', '3621', '3618', '8318', '7163', '8306', '8301', '8334', '8234', '2921', '3625', '8328', '17711', '14188', '9871', '10087', '3220', '9876', '7495', '9836', '6383', '18592', '8235', '10104', '3672', '8247', '3725', '15631', '3535', '17224', '3670', '6516', '15671', '10080', '8210', '7983', '18596', '783', '14414', '14051', '3640', '16282', '9870', '9453', '8336', '17736', '16537', '7201', '15687', '9853', '8236', '8233', '3675', '14836', '3724', '781', '8227', '17161', '8193', '496', '7216', '9835', '14455', '8335', '18063', '7154', '7979', '7157', '9852', '12106', '3641', '2586', '510', '7179', '8333', '8248', '3642', '8330', '14466', '15677', '15559', '6387', '10518', '1236', '7771', '4518', '8331', '9479', '3540', '10503', '9484', '1262', '19193', '6384', '14521', '17284', '9466', '11786', '8478', '14509', '10108', '6521', '18020', '14697', '14602', '7191', '432', '12108', '2317', '10560', '15472', '5590', '10559', '8245', '13463', '1317', '3685', '8304', '10557', '1307', '16314', '15930', '14471', '3645', '14373', '16261', '18617', '12121', '10110', '16552', '15458', '1276', '14462', '1280', '16740', '14372', '425', '10514', '9477', '15012', '2923', '7665', '3644', '18608', '2132', '8320', '15906', '14726']
errors = []
for folder in [none_folder, ours_gt_flow_folder, ours_folder, rignet_folder, skerig_folder, skerig_gt_flow_folder]:
error_list = glob.glob(os.path.join(folder, f"*_{type}_flow_error.npy"))
error = []
for error_filename in error_list:
#model_id = error_filename.split("/")[-1].split("_")[0]
#if model_id in model_list:
error.append(np.load(error_filename))
error = np.concatenate(error, axis=0)
error = error.mean(axis=0)
errors.append(error)
t = np.arange(101) # 101
plt.plot(t, errors[0], 'm--', label="none")
#plt.plot(t, errors[1], 'g--', label="ours_gt_flow")
plt.plot(t, errors[2], 'y--', label="ours_pred_flow")
plt.plot(t, errors[3], 'b--', label="rignet_pred_flow")
plt.plot(t, errors[4], 'r--', label="SkeRig_pred_flow")
#plt.plot(t, errors[5], 'k--', label="SkeRig_gt_flow")
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
testset_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test/"
mesh_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/obj_remesh/"
deformnet_seq_path = "checkpoints/deform_p_mr_seq/model_best.pth.tar"
rig_folder = "results/our_results"
deformnet = load_deformnet(deformnet_path=deformnet_seq_path)
pts_traj_filelist = glob.glob(os.path.join(testset_folder, f"*_pts_traj.npy"))
mkdir_p(os.path.join(rig_folder, "tracking_loss/"))
for pts_traj_filename in tqdm(pts_traj_filelist):
model_id = pts_traj_filename.split("/")[-1].split("_")[0]
gt_pts_traj = np.load(pts_traj_filename).reshape(-1, 101, 3)
gt_vtx_traj = np.load(pts_traj_filename.replace("_pts_traj.npy", "_vtx_traj.npy"))
gt_vismask = np.load(pts_traj_filename.replace("_pts_traj.npy", "_vismask.npy"))
rig = Rig(os.path.join(rig_folder, f"{model_id}_rig2.txt"))
tpl_e = np.loadtxt(os.path.join(testset_folder, f"{model_id}_tpl_e.txt")).T
geo_e = np.loadtxt(os.path.join(testset_folder, f"{model_id}_geo_e.txt")).T
pred_vtx_traj, pred_vismask, pred_quats = \
tracking_one(vtx_ori=gt_vtx_traj[:, 0, :],
rig=rig, pts_traj=gt_pts_traj,
tpl_e=tpl_e, geo_e=geo_e,
deformnet=deformnet)
# evaluate
full_flow_error = np.sqrt(np.sum((pred_vtx_traj - gt_vtx_traj[:, 1:, :]) ** 2, axis=2)).mean()
vis_flow_error = ((np.sqrt(np.sum((pred_vtx_traj - gt_vtx_traj[:, 1:, :]) ** 2, axis=2)) * (gt_vismask[:, 1:] > 0.5)).sum()) / (gt_vismask[:, 1:] > 0.5).sum()
# save results
np.savez(os.path.join(rig_folder, f"tracking_loss/{model_id}_tracking.npz"),
pred_quats=pred_quats, pred_vtx_traj=pred_vtx_traj, pred_vismask=pred_vismask,
full_flow_error=full_flow_error, vis_flow_error=vis_flow_error)
| 12,890 | 53.622881 | 1,271 | py |
MoRig | MoRig-master/evaluate/joint2rig.py | import sys
sys.path.append("./")
import glob, os, open3d as o3d, numpy as np, itertools as it, cv2, copy, sys, time, trimesh
from tqdm import tqdm
from scipy.sparse import lil_matrix
from scipy.sparse.csgraph import dijkstra
import torch
from torch_geometric.data import Data
from torch_geometric.utils import add_self_loops
from utils import binvox_rw
from utils.vis_utils import draw_shifted_pts, show_obj_rig, drawCone, drawSphere
from utils.rig_parser import Info, Rig, Node, TreeNode
from utils.mst_utils import inside_check, sample_on_bone, increase_cost_for_outside_bone, primMST, primMST_symmetry
from data_proc.common_ops import calc_surface_geodesic
from data_proc.gen_skin_data import get_bones
from models.rootnet import ROOTNET
from models.bonenet import PairCls as BONENET
from models.rignet import SkinMotion
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def getInitId(data, model):
"""
predict root joint ID via rootnet
:param data:
:param model:
:return:
"""
with torch.no_grad():
root_prob, _ = model(data, shuffle=False)
root_prob = torch.sigmoid(root_prob).data.cpu().numpy()
root_id = np.argmax(root_prob)
return root_id
def pts2line(pts, lines):
'''
Calculate points-to-bone distance. Point to line segment distance refer to
https://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
:param pts: N*3
:param lines: N*6, where [N,0:3] is the starting position and [N, 3:6] is the ending position
:return: origins are the neatest projected position of the point on the line.
ends are the points themselves.
dist is the distance in between, which is the distance from points to lines.
Origins and ends will be used for generate rays.
'''
l2 = np.sum((lines[:, 3:6] - lines[:, 0:3]) ** 2, axis=1)
origins = np.zeros((len(pts) * len(lines), 3))
ends = np.zeros((len(pts) * len(lines), 3))
dist = np.zeros((len(pts) * len(lines)))
for l in range(len(lines)):
if np.abs(l2[l]) < 1e-8: # for zero-length edges
origins[l * len(pts):(l + 1) * len(pts)] = lines[l][0:3]
else: # for other edges
t = np.sum((pts - lines[l][0:3][np.newaxis, :]) * (lines[l][3:6] - lines[l][0:3])[np.newaxis, :], axis=1) / \
l2[l]
t = np.clip(t, 0, 1)
t_pos = lines[l][0:3][np.newaxis, :] + t[:, np.newaxis] * (lines[l][3:6] - lines[l][0:3])[np.newaxis, :]
origins[l * len(pts):(l + 1) * len(pts)] = t_pos
ends[l * len(pts):(l + 1) * len(pts)] = pts
dist[l * len(pts):(l + 1) * len(pts)] = np.linalg.norm(
origins[l * len(pts):(l + 1) * len(pts)] - ends[l * len(pts):(l + 1) * len(pts)], axis=1)
return origins, ends, dist
def calc_pts2bone_visible_mat(mesh, origins, ends):
'''
Check whether the surface point is visible by the internal bone.
Visible is defined as no occlusion on the path between.
:param mesh:
:param surface_pts: points on the surface (n*3)
:param origins: origins of rays
:param ends: ends of the rays, together with origins, we can decide the direction of the ray.
:return: binary visibility matrix (n*m), where 1 indicate the n-th surface point is visible to the m-th ray
'''
ray_dir = ends - origins
RayMeshIntersector = trimesh.ray.ray_triangle.RayMeshIntersector(mesh)
locations, index_ray, index_tri = RayMeshIntersector.intersects_location(origins, ray_dir + 1e-15)
locations_per_ray = [locations[index_ray == i] for i in range(len(ray_dir))]
min_hit_distance = []
for i in range(len(locations_per_ray)):
if len(locations_per_ray[i]) == 0:
min_hit_distance.append(np.linalg.norm(ray_dir[i]))
else:
min_hit_distance.append(np.min(np.linalg.norm(locations_per_ray[i] - origins[i], axis=1)))
min_hit_distance = np.array(min_hit_distance)
distance = np.linalg.norm(ray_dir, axis=1)
vis_mat = (np.abs(min_hit_distance - distance) < 1e-4)
return vis_mat
def add_duplicate_joints(rig):
this_level = [rig.root_id]
pos_new = [rig.pos[rig.root_id]]
hier_new = [-1]
names_new = [rig.root_name]
while this_level:
next_level = []
for pid in this_level:
ch_ids = np.argwhere(rig.hierarchy == pid).squeeze(axis=1)
if len(ch_ids) > 1:
for dup_id, ch_id in enumerate(ch_ids):
# duplicate parent node
pos_new.append(rig.pos[pid] + 0.01 * (rig.pos[ch_id] - rig.pos[pid]))
names_new.append(rig.names[pid] + f"_dup_{dup_id}")
hier_new.append(names_new.index(rig.names[pid]))
# add child node
pos_new.append(rig.pos[ch_id])
names_new.append(rig.names[ch_id])
hier_new.append(names_new.index(rig.names[pid] + f"_dup_{dup_id}"))
elif len(ch_ids) == 1:
ch_id = ch_ids[0]
pos_new.append(rig.pos[ch_id])
names_new.append(rig.names[ch_id])
hier_new.append(names_new.index(rig.names[pid]))
else: # no child
pass
next_level += ch_ids.tolist()
this_level = next_level
rig_new = Rig()
rig_new.pos = np.array(pos_new)
rig_new.hierarchy = np.array(hier_new)
rig_new.root_id = 0
rig_new.root_name = rig.root_name
rig_new.names = names_new
rig_new.calc_frames_and_offsets()
return rig_new
def mapping_bone_index(bones_old, bones_new):
bone_map = {}
for i in range(len(bones_old)):
bone_old = bones_old[i][np.newaxis, :]
dist = np.linalg.norm(bones_new - bone_old, axis=1)
ni = np.argmin(dist)
bone_map[i] = ni
return bone_map
def assemble_skel_skin(skel, attachment):
bones_old, bone_names_old, _ = get_bones(skel)
rig_new = add_duplicate_joints(skel)
bones_new, bone_names_new, _ = get_bones(rig_new)
bone_mapping = mapping_bone_index(bones_old, bones_new)
for v in range(len(attachment)):
skw = attachment[v]
skw_new = np.zeros(len(rig_new.names))
for i in range(len(skw)):
if skw[i] > 1e-5:
bind_joint_name = bone_names_new[bone_mapping[i]][0]
bind_weight = skw[i]
skw_new[rig_new.names.index(bind_joint_name)] = bind_weight
rig_new.skins.append(skw_new)
rig_new.skins = np.stack(rig_new.skins, axis=0)
return rig_new
def post_filter(skin_weights, topology_edge, num_ring=1):
skin_weights_new = np.zeros_like(skin_weights)
for v in range(len(skin_weights)):
adj_verts_multi_ring = []
current_seeds = [v]
for r in range(num_ring):
adj_verts = []
for seed in current_seeds:
adj_edges = topology_edge[:, np.argwhere(topology_edge == seed)[:, 1]]
adj_verts_seed = list(set(adj_edges.flatten().tolist()))
adj_verts_seed.remove(seed)
adj_verts += adj_verts_seed
adj_verts_multi_ring += adj_verts
current_seeds = adj_verts
adj_verts_multi_ring = list(set(adj_verts_multi_ring))
if len(adj_verts_multi_ring) == 0:
skin_weights_new[v, :] = skin_weights[v]
else:
if v in adj_verts_multi_ring:
adj_verts_multi_ring.remove(v)
skin_weights_neighbor = [skin_weights[int(i), :][np.newaxis, :] for i in adj_verts_multi_ring]
skin_weights_neighbor = np.concatenate(skin_weights_neighbor, axis=0)
#max_bone_id = np.argmax(skin_weights[v, :])
#if np.sum(skin_weights_neighbor[:, max_bone_id]) < 0.17 * len(skin_weights_neighbor):
# skin_weights_new[v, :] = np.mean(skin_weights_neighbor, axis=0)
#else:
# skin_weights_new[v, :] = skin_weights[v, :]
skin_weights_new[v, :] = np.mean(skin_weights_neighbor, axis=0)
#skin_weights_new[skin_weights_new.sum(axis=1) == 0, :] = skin_weights[skin_weights_new.sum(axis=1) == 0, :]
return skin_weights_new
def predict_skeleton(input_data, vox, root_pred_net, bone_pred_net):
"""
Predict skeleton structure based on joints
:param input_data: wrapped data
:param vox: voxelized mesh
:param root_pred_net: network to predict root
:param bone_pred_net: network to predict pairwise connectivity cost
:param mesh_filename: meshfilename for debugging
:return: predicted skeleton structure
"""
root_id = getInitId(input_data, root_pred_net)
pred_joints = input_data.joints.data.cpu().numpy()
with torch.no_grad():
connect_prob, _ = bone_pred_net(input_data, permute_joints=False)
connect_prob = torch.sigmoid(connect_prob)
pair_idx = input_data.pairs.long().data.cpu().numpy()
prob_matrix = np.zeros((len(input_data.joints), len(input_data.joints)))
prob_matrix[pair_idx[:, 0], pair_idx[:, 1]] = connect_prob.data.cpu().numpy().squeeze()
prob_matrix = prob_matrix + prob_matrix.transpose()
cost_matrix = -np.log(prob_matrix + 1e-10)
cost_matrix = increase_cost_for_outside_bone(cost_matrix, pred_joints, vox)
parent, key = primMST(cost_matrix, root_id)
#parent, key, root_id = primMST_symmetry(cost_matrix, root_id, pred_joints)
pred_skel = Rig()
pred_skel.pos = pred_joints
pred_skel.root_id = root_id
pred_skel.names = [f'joint_{i}' for i in range(len(pred_joints))]
pred_skel.root_name = pred_skel.names[root_id]
pred_skel.hierarchy = parent
pred_skel.calc_frames_and_offsets()
return pred_skel #, img
def create_one_data(v, joints, tpl_e, geo_e, vox, motion=None, flow=None):
# prepare and add new data members
pairs = list(it.combinations(range(joints.shape[0]), 2))
pair_attr = []
for pr in pairs:
dist = np.linalg.norm(joints[pr[0]] - joints[pr[1]])
bone_samples = sample_on_bone(joints[pr[0]], joints[pr[1]], step_size=0.01)
bone_samples_inside, _ = inside_check(bone_samples, vox)
outside_proportion = len(bone_samples_inside) / (len(bone_samples) + 1e-10)
attr = np.array([dist, outside_proportion, 1])
pair_attr.append(attr)
pairs = np.array(pairs)
pair_attr = np.array(pair_attr)
pairs = torch.from_numpy(pairs).float()
pair_attr = torch.from_numpy(pair_attr).float()
joints = torch.from_numpy(joints).float()
joints_batch = torch.zeros(len(joints), dtype=torch.long)
pairs_batch = torch.zeros(len(pairs), dtype=torch.long)
v = torch.from_numpy(v).float()
tpl_e = torch.from_numpy(tpl_e).long()
tpl_e, _ = add_self_loops(tpl_e, num_nodes=v.size(0))
geo_e = torch.from_numpy(geo_e).long()
geo_e, _ = add_self_loops(geo_e, num_nodes=v.size(0))
if motion is not None:
motion = torch.from_numpy(motion).float()
if flow is not None:
flow = torch.from_numpy(flow).float()
# batch
batch = torch.zeros(len(v), dtype=torch.long)
data = Data(pos=v, tpl_edge_index=tpl_e, geo_edge_index=geo_e, batch=batch, joints=joints, motion=motion,
flow=flow, pairs=pairs, pair_attr=pair_attr, joints_batch=joints_batch, pairs_batch=pairs_batch)
return data
def pred_skel_func(res_folder):
mesh_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/obj_remesh/"
vox_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/vox/"
testset_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test/"
rootNet = ROOTNET()
rootNet.to(device)
rootNet.eval()
rootNet_checkpoint = torch.load('checkpoints/rootnet/model_best.pth.tar')
rootNet.load_state_dict(rootNet_checkpoint['state_dict'])
print(" root prediction network loaded.")
boneNet = BONENET()
boneNet.to(device)
boneNet.eval()
boneNet_checkpoint = torch.load('checkpoints/bonenet/model_best.pth.tar')
boneNet.load_state_dict(boneNet_checkpoint['state_dict'])
print(" bone prediction network loaded.")
joint_filelist = glob.glob(os.path.join(res_folder, "*_joint.npy"))
for joint_filename in tqdm(joint_filelist):
model_id = joint_filename.split("/")[-1].split("_")[0]
if os.path.exists(os.path.join(res_folder, f"{model_id}_skel.txt")):
continue
mesh_filename = os.path.join(mesh_folder, f"{model_id}.obj")
mesh = o3d.io.read_triangle_mesh(mesh_filename)
v = np.asarray(mesh.vertices)
tpl_e = np.loadtxt(os.path.join(testset_folder, f"{model_id}_tpl_e.txt")).T
geo_e = np.loadtxt(os.path.join(testset_folder, f"{model_id}_geo_e.txt")).T
with open(os.path.join(vox_folder, f"{model_id}.binvox"), 'rb') as fvox:
vox = binvox_rw.read_as_3d_array(fvox)
joints = np.load(joint_filename)
#img = draw_shifted_pts(mesh_filename, joints)
data = create_one_data(v, joints, tpl_e, geo_e, vox)
data.to(device)
pred_skeleton = predict_skeleton(data, vox, rootNet, boneNet)
#show_obj_rig(mesh, pred_skeleton)
pred_skeleton.save(os.path.join(res_folder, f"{model_id}_skel.txt"))
#cv2.imwrite(os.path.join(res_folder, f"{model_id}_skel.png"), img[:,:,::-1])
def calc_geodesic_matrix(bones, mesh_v, surface_geodesic, mesh_filename, subsampling=False):
"""
calculate volumetric geodesic distance from vertices to each bones
:param bones: B*6 numpy array where each row stores the starting and ending joint position of a bone
:param mesh_v: V*3 mesh vertices
:param surface_geodesic: geodesic distance matrix of all vertices
:param mesh_filename: mesh filename
:return: an approaximate volumetric geodesic distance matrix V*B, were (v,b) is the distance from vertex v to bone b
"""
if subsampling:
mesh0 = o3d.io.read_triangle_mesh(mesh_filename)
mesh0 = mesh0.simplify_quadric_decimation(3000)
o3d.io.write_triangle_mesh(mesh_filename.replace(".obj", "_simplified.obj"), mesh0)
mesh_trimesh = trimesh.load(mesh_filename.replace(".obj", "_simplified.obj"))
subsamples_ids = np.random.choice(len(mesh_v), np.min((len(mesh_v), 1500)), replace=False)
subsamples = mesh_v[subsamples_ids, :]
surface_geodesic = surface_geodesic[subsamples_ids, :][:, subsamples_ids]
else:
mesh_trimesh = trimesh.load(mesh_filename)
subsamples = mesh_v
origins, ends, pts_bone_dist = pts2line(subsamples, bones)
pts_bone_visibility = calc_pts2bone_visible_mat(mesh_trimesh, origins, ends)
pts_bone_visibility = pts_bone_visibility.reshape(len(bones), len(subsamples)).transpose()
pts_bone_dist = pts_bone_dist.reshape(len(bones), len(subsamples)).transpose()
# remove visible points which are too far
for b in range(pts_bone_visibility.shape[1]):
visible_pts = np.argwhere(pts_bone_visibility[:, b] == 1).squeeze(1)
if len(visible_pts) == 0:
continue
threshold_b = np.percentile(pts_bone_dist[visible_pts, b], 15)
pts_bone_visibility[pts_bone_dist[:, b] > 1.3 * threshold_b, b] = False
visible_matrix = np.zeros(pts_bone_visibility.shape)
visible_matrix[np.where(pts_bone_visibility == 1)] = pts_bone_dist[np.where(pts_bone_visibility == 1)]
for c in range(visible_matrix.shape[1]):
unvisible_pts = np.argwhere(pts_bone_visibility[:, c] == 0).squeeze(1)
visible_pts = np.argwhere(pts_bone_visibility[:, c] == 1).squeeze(1)
if len(visible_pts) == 0:
visible_matrix[:, c] = pts_bone_dist[:, c]
continue
for r in unvisible_pts:
dist1 = np.min(surface_geodesic[r, visible_pts])
nn_visible = visible_pts[np.argmin(surface_geodesic[r, visible_pts])]
if np.isinf(dist1):
visible_matrix[r, c] = 8.0 + pts_bone_dist[r, c]
else:
visible_matrix[r, c] = dist1 + visible_matrix[nn_visible, c]
if subsampling:
nn_dist = np.sum((mesh_v[:, np.newaxis, :] - subsamples[np.newaxis, ...])**2, axis=2)
nn_ind = np.argmin(nn_dist, axis=1)
visible_matrix = visible_matrix[nn_ind, :]
os.remove(mesh_filename.replace(".obj", "_simplified.obj"))
return visible_matrix
def remove_dup_joints(rig_ori):
this_level = [rig_ori.root_id]
joint_res = []
skin_res = []
hier_res = [-1]
names_res = [rig_ori.root_name]
while this_level:
next_level = []
for p_id in this_level:
ch_ids = np.argwhere(rig_ori.hierarchy == p_id).squeeze(axis=1)
for ch_id in ch_ids:
if "_dup" in rig_ori.names[ch_id]:
ch_id_of_ch = np.argwhere(rig_ori.hierarchy == ch_id).squeeze(axis=1)[0]
next_level.append(ch_id_of_ch)
names_res.append(rig_ori.names[ch_id_of_ch])
rig_ori.skins[:, p_id] += rig_ori.skins[:, ch_id]
else:
next_level.append(ch_id)
names_res.append(rig_ori.names[ch_id])
hier_res.append(names_res.index(rig_ori.names[p_id]))
joint_res.append(rig_ori.pos[p_id])
skin_res.append(rig_ori.skins[:, p_id])
this_level = next_level
rig_res = Rig()
rig_res.pos = np.stack(joint_res, axis=0)
rig_res.hierarchy = np.array(hier_res)
rig_res.names = names_res
rig_res.root_id = 0
rig_res.root_name = rig_ori.root_name
rig_res.calc_frames_and_offsets()
rig_res.skins = np.stack(skin_res, axis=1)
return rig_res
def predict_skinning(input_data, pred_skel, skin_pred_net, surface_geodesic, mesh_filename, subsampling=False):
"""
predict skinning
:param input_data: wrapped input data
:param pred_skel: predicted skeleton
:param skin_pred_net: network to predict skinning weights
:param surface_geodesic: geodesic distance matrix of all vertices
:param mesh_filename: mesh filename
:return: predicted rig with skinning weights information
"""
global device
num_nearest_bone = 5
bones, bone_names, bone_isleaf = get_bones(pred_skel)
mesh_v = input_data.pos.data.cpu().numpy()
print(" calculating volumetric geodesic distance from vertices to bone. This step takes some time...")
geo_dist = calc_geodesic_matrix(bones, mesh_v, surface_geodesic, mesh_filename, subsampling=subsampling)
input_samples = [] # joint_pos (x, y, z), (bone_id, 1/D)*5
loss_mask = []
skin_nn = []
for v_id in range(len(mesh_v)):
geo_dist_v = geo_dist[v_id]
bone_id_near_to_far = np.argsort(geo_dist_v)
this_sample = []
this_nn = []
this_mask = []
for i in range(num_nearest_bone):
if i >= len(bones):
this_sample += bones[bone_id_near_to_far[0]].tolist()
this_sample.append(1.0 / (geo_dist_v[bone_id_near_to_far[0]] + 1e-10))
this_sample.append(bone_isleaf[bone_id_near_to_far[0]])
this_nn.append(0)
this_mask.append(0)
else:
skel_bone_id = bone_id_near_to_far[i]
this_sample += bones[skel_bone_id].tolist()
this_sample.append(1.0 / (geo_dist_v[skel_bone_id] + 1e-10))
this_sample.append(bone_isleaf[skel_bone_id])
this_nn.append(skel_bone_id)
this_mask.append(1)
input_samples.append(np.array(this_sample)[np.newaxis, :])
skin_nn.append(np.array(this_nn)[np.newaxis, :])
loss_mask.append(np.array(this_mask)[np.newaxis, :])
skin_input = np.concatenate(input_samples, axis=0)
loss_mask = np.concatenate(loss_mask, axis=0)
skin_nn = np.concatenate(skin_nn, axis=0)
skin_input = torch.from_numpy(skin_input).float()
input_data.skin_input = skin_input
input_data.to(device)
motion_all, motion_aggr, skin_pred = skin_pred_net(input_data, input_data.flow)
skin_pred = skin_pred * torch.from_numpy(loss_mask).to(skin_pred.device)
skin_pred = torch.softmax(skin_pred, dim=1)
skin_pred = skin_pred.data.cpu().numpy()
motion_aggr = motion_aggr.detach().cpu().numpy()
skin_nn = skin_nn[:, 0:num_nearest_bone]
skin_pred_full = np.zeros((len(skin_pred), len(bone_names)))
for v in range(len(skin_pred)):
for nn_id in range(len(skin_nn[v, :])):
if loss_mask[v, nn_id] == 1:
skin_pred_full[v, skin_nn[v, nn_id]] = skin_pred[v, nn_id]
print(" filtering skinning prediction")
tpl_e = input_data.tpl_edge_index.data.cpu().numpy()
skin_pred_full = post_filter(skin_pred_full, tpl_e, num_ring=1)
skin_pred_full[skin_pred_full < np.max(skin_pred_full, axis=1, keepdims=True) * 0.35] = 0.0
skin_pred_full = skin_pred_full / (skin_pred_full.sum(axis=1, keepdims=True) + 1e-10)
skel_res = assemble_skel_skin(pred_skel, skin_pred_full)
return skel_res
def pred_rig_func(res_folder):
mesh_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/obj_remesh/"
vox_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/vox/"
testset_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/test/"
surface_geodesic_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/surface_geodesic/"
skinNet = SkinMotion(nearest_bone=5, use_motion=True, use_Dg=False, use_Lf=False, motion_dim=32, num_keyframes=5)
skinNet_checkpoint = torch.load('checkpoints/skin_motion/model_best.pth.tar')
skinNet.load_state_dict(skinNet_checkpoint['state_dict'])
skinNet.to(device)
skinNet.eval()
print(" skinning prediction network loaded.")
skel_filelist = glob.glob(os.path.join(res_folder, "*_skel.txt"))
for skel_filename in tqdm(skel_filelist):
model_id = skel_filename.split("/")[-1].split("_")[0]
if os.path.exists(os.path.join(res_folder, f"{model_id}_rig.txt")):
continue
mesh_filename = os.path.join(mesh_folder, f"{model_id}.obj")
mesh = o3d.io.read_triangle_mesh(mesh_filename)
mesh.compute_vertex_normals()
pred_skeleton = Rig(skel_filename)
#show_obj_rig(mesh, pred_skeleton)
v = np.asarray(mesh.vertices)
tpl_e = np.loadtxt(os.path.join(testset_folder, f"{model_id}_tpl_e.txt")).T
geo_e = np.loadtxt(os.path.join(testset_folder, f"{model_id}_geo_e.txt")).T
with open(os.path.join(vox_folder, f"{model_id}.binvox"), 'rb') as fvox:
vox = binvox_rw.read_as_3d_array(fvox)
joints = np.load(skel_filename.replace("_skel.txt", "_joint.npy"))
pred_flow = []
for t in range(1, 6):
pred_flow.append(np.load(os.path.join(testset_folder, f"pred_flow/{model_id}_{t}_pred_flow.npy")))
pred_flow = np.concatenate(pred_flow, axis=1)
data = create_one_data(v, joints, tpl_e, geo_e, vox, flow=pred_flow)
data.to(device)
if os.path.exists(os.path.join(surface_geodesic_folder, f"{model_id}.npy")):
surface_geodesic = np.load(os.path.join(surface_geodesic_folder, f"{model_id}.npy"))
else:
surface_geodesic = calc_surface_geodesic(mesh, number_of_points=4000)
np.save(os.path.join(surface_geodesic_folder, f"{model_id}.npy"), surface_geodesic)
pred_rig = predict_skinning(data, pred_skeleton, skinNet, surface_geodesic, mesh_filename, subsampling=True)
# remove duplicate joints
pred_rig = remove_dup_joints(pred_rig)
pred_rig.save(os.path.join(res_folder, f"{model_id}_rig.txt"))
if __name__ == "__main__":
res_folder_ours = "results/our_results/"
#pred_skel_func(res_folder_ours) # step 1
pred_rig_func(res_folder_ours) # step 2
| 23,841 | 45.116054 | 121 | py |
MoRig | MoRig-master/evaluate/eval_rigging.py | import sys
sys.path.append("./")
import glob, os, numpy as np, cv2, sys, math, scipy
import open3d as o3d
from tqdm import tqdm
from scipy.optimize import linear_sum_assignment
from sklearn.cluster import estimate_bandwidth
from utils import binvox_rw
from utils.os_utils import mkdir_p
from utils.io_utils import readPly
from utils.vis_utils import draw_shifted_pts, draw_joints, visualize_seg, visualize_seg_joints, show_obj_rig, drawSphere
from utils.mst_utils import flip, inside_check
from utils.rig_parser import Info
from utils.eval_utils import chamfer_dist
from utils.cluster_utils import meanshift_cluster, nms_meanshift
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_featuresize(filename):
with open(filename, 'r') as fin:
lines = fin.readlines()
fs_dict = {}
for li in lines:
words = li.strip().split()
fs_dict[words[0]] = float(words[1])
return fs_dict
def get_joint_with_name(skel):
joints = []
names = []
this_level = [skel.root]
while this_level:
next_level = []
for p_node in this_level:
joint_ = np.array(p_node.pos)
joint_ = joint_[np.newaxis, :]
joints.append(joint_)
names.append(p_node.name)
next_level += p_node.children
this_level = next_level
joints = np.concatenate(joints, axis=0)
return joints, names
def eval_rig(bandwidth_quantile=0.04, threshold1=0.1, threshold2=0.02):
mesh_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/obj_remesh/"
info_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/rig_info_remesh/"
vox_folder = "/mnt/DATA_LINUX2/zhan/morig/ModelsResources/vox/"
featuresize_folder = '/mnt/DATA_LINUX2/zhan/morig/ModelsResources/joint_featuresize/'
ply_folder = "results/our_results/"
attn_folder = "results/our_results/"
output_folder = f"results/our_results/"
mkdir_p(output_folder)
ply_list = glob.glob(os.path.join(ply_folder, '*.ply'))
chamfer_j2j_total = 0.0
joint_IoU_total = 0.0
joint_precision_total = 0.0
joint_recall_total = 0.0
num_invalid = 0
for ply_filename in tqdm(ply_list):
model_id = ply_filename.split('/')[-1].split('.')[0]
attn_filename = os.path.join(attn_folder, f"{model_id}_attn.npy")
mesh_filename = os.path.join(mesh_folder, '{:s}.obj'.format(model_id))
mesh = o3d.io.read_triangle_mesh(mesh_filename)
vtx = np.asarray(mesh.vertices)
attn = np.load(attn_filename)
attn = (attn - np.min(attn)) / (np.max(attn) - np.min(attn))
vox_file = os.path.join(vox_folder, '{:s}.binvox'.format(model_id))
with open(vox_file, 'rb') as fvox:
vox = binvox_rw.read_as_3d_array(fvox)
shifted_pts = readPly(ply_filename)
#img = draw_shifted_pts(mesh, shifted_pts, weights=attn)
#cv2.imwrite(os.path.join(res_folder, "{:s}_pts.png".format(model_id)), img[:,:,::-1])
shifted_pts, index_inside = inside_check(shifted_pts, vox)
attn = attn[index_inside, :]
shifted_pts = shifted_pts[attn.squeeze() > threshold1]
attn = attn[attn.squeeze() > threshold1]
# symmetrize points by reflecting
shifted_pts_reflect = shifted_pts * np.array([[-1, 1, 1]])
shifted_pts = np.concatenate((shifted_pts, shifted_pts_reflect), axis=0)
attn = np.tile(attn, (2, 1))
bandwidth = estimate_bandwidth(shifted_pts, quantile=bandwidth_quantile)
#print(f"bandwidth: {bandwidth}")
shifted_pts = meanshift_cluster(shifted_pts, bandwidth, attn, max_iter=30)
#img = draw_shifted_pts(mesh, shifted_pts, weights=attn)
pred_joints = nms_meanshift(shifted_pts, attn=attn, bandwidth=bandwidth, thrd_density=threshold2)
pred_joints, _ = flip(pred_joints)
#img = draw_joints(mesh_filename, pred_joints)
#cv2.imwrite(os.path.join(res_folder, "{:s}_joint.png".format(model_id)), img[:,:,::-1])
np.save(os.path.join(output_folder, "{:s}_joint.npy".format(model_id)), pred_joints)
fs_file = os.path.join(featuresize_folder, f'{model_id}.txt')
fs_dict = load_featuresize(fs_file)
gt_skel = Info(os.path.join(info_folder, f'{model_id}.txt'))
gt_joint, gt_joint_name = get_joint_with_name(gt_skel)
fs = [fs_dict[i] for i in gt_joint_name]
fs = np.array(fs)
# print(len(gt_joint), len(pred_joint))
if len(pred_joints) == 0:
num_invalid += 1
continue
chamfer_j2j = chamfer_dist(pred_joints, gt_joint)
chamfer_j2j_total += chamfer_j2j
dist_matrix = np.sqrt(np.sum((pred_joints[np.newaxis, ...] - gt_joint[:, np.newaxis, :]) ** 2, axis=2))
row_ind, col_ind = linear_sum_assignment(dist_matrix)
fs_threshod = fs[row_ind]
joint_IoU = 2 * np.sum(dist_matrix[row_ind, col_ind] < fs_threshod) / (len(pred_joints) + len(gt_joint))
joint_IoU_total += joint_IoU
joint_precision = np.sum(dist_matrix[row_ind, col_ind] < fs_threshod) / len(pred_joints)
joint_precision_total += joint_precision
joint_recall = np.sum(dist_matrix[row_ind, col_ind] < fs_threshod) / len(gt_joint)
joint_recall_total += joint_recall
print("num_invalid:", num_invalid)
chamfer_j2j_total /= (len(ply_list) - num_invalid)
joint_precision_total /= (len(ply_list) - num_invalid)
joint_recall_total /= (len(ply_list) - num_invalid)
joint_IoU_total /= (len(ply_list) - num_invalid)
print('{:s}\n'.format(attn_folder),
'\tJ2J_chamfer_distance {:.03f}%\n'.format(chamfer_j2j_total * 100),
'\tjoint_IoU {:.03f}%\n'.format(joint_IoU_total * 100),
'\tjoint_precision {:.03f}%\n'.format(joint_precision_total * 100),
'\tjoint_recall {:.03f}%\n'.format(joint_recall_total * 100))
if __name__ == '__main__':
#bandwidth_quantile, threshold1, threshold_density = float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3])
#print(f"bandwidth_quantile: {bandwidth_quantile}, threshold1: {threshold1}, threshold_density: {threshold_density}")
eval_rig() #bandwidth_quantile, threshold1, threshold_density | 6,230 | 44.481752 | 121 | py |
MoRig | MoRig-master/utils/deform_ik.py | import numpy as np
import torch
import torch.nn as nn
import math
class Deform_IK:
def __init__(self, vismask_thrd=0.35):
self.vismask_thrd = vismask_thrd
self.crit = nn.MSELoss(reduction='none')
@staticmethod
def transform_from_euler(rotation, order='xyz'):
#rotation = rotation / 180 * math.pi
transform = torch.matmul(Deform_IK.transform_from_axis(rotation[..., 1], order[1]),
Deform_IK.transform_from_axis(rotation[..., 2], order[2]))
transform = torch.matmul(Deform_IK.transform_from_axis(rotation[..., 0], order[0]), transform)
return transform
@staticmethod
def transform_from_axis(euler, axis):
transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)
cos = torch.cos(euler)
sin = torch.sin(euler)
cord = ord(axis) - ord('x')
transform[..., cord, :] = transform[..., :, cord] = 0
transform[..., cord, cord] = 1
if axis == 'x':
transform[..., 1, 1] = transform[..., 2, 2] = cos
transform[..., 1, 2] = -sin
transform[..., 2, 1] = sin
if axis == 'y':
transform[..., 0, 0] = transform[..., 2, 2] = cos
transform[..., 0, 2] = sin
transform[..., 2, 0] = -sin
if axis == 'z':
transform[..., 0, 0] = transform[..., 1, 1] = cos
transform[..., 0, 1] = -sin
transform[..., 1, 0] = sin
return transform
def FK(self, locals, offsets, root_id, parent, root_translation):
globals = list(torch.chunk(locals, int(locals.shape[0]), dim=0))
jpos_res = torch.zeros_like(offsets)
jpos_res[root_id] = offsets[root_id] + root_translation
this_level = [root_id]
while this_level:
next_level = []
for p_id in this_level:
ch_list = np.argwhere(parent==p_id).squeeze(axis=1)
for ch_id in ch_list:
globals[ch_id] = torch.matmul(globals[p_id], locals[ch_id])
jpos_res[ch_id] = torch.matmul(globals[p_id], offsets[ch_id][:, None]).squeeze() + jpos_res[p_id]
next_level+=ch_list.tolist()
this_level = next_level
globals = torch.cat(globals, dim=0)
return globals, jpos_res
def run(self, locals_in, offsets, parent, root_id, vert_local, skinning, constraints, vismask, iter_time=100, lr=5e-2, w_invis=0.0):
self.locals = locals_in
self.offsets = offsets
self.parent = parent
self.root_id = root_id
self.vert_local = vert_local
self.skinning = skinning
self.constraints = constraints.clone()
self.vismask = (vismask > self.vismask_thrd).float().detach()
self.vismask[self.vismask==0] = w_invis
self.rotation_angles = torch.ones((len(self.locals), 3), dtype=torch.float32, device=self.locals.device) * 0.01
# add contrain if necessary
# self.mask = torch.ones_like(self.rotation_angles)
# self.mask[:, 2] = 0.05
# self.mask[:, 1] = 0.1
#self.mask = torch.ones_like(self.rotation_angles)
#self.mask[:, 1] = 0.2
# self.mask = torch.ones_like(self.rotation_angles)
# self.mask[11] = 0.0
# self.mask[14] = 0.01
# self.mask[16] = 0.01
# self.mask[2] = 0.01
# self.mask[8] = 0.0
# self.mask = torch.ones_like(self.rotation_angles)
# self.mask[2] = 0.0
# self.mask[3, 1] = 0.2
# self.mask[6] = 0.01
# self.mask[13:16] = 0.0
self.translation = torch.ones(3, dtype=torch.float32, device=self.locals.device) * 0.01
self.rotation_angles.requires_grad_(True)
self.translation.requires_grad_(True)
self.optimizer = torch.optim.Adam([{'params': self.rotation_angles, 'lr': lr*math.pi},
{'params': self.translation, 'lr': lr}],
lr=0.1, betas=(0.9, 0.999), weight_decay=1e-4)
for i in range(iter_time):
self.optimizer.zero_grad()
self.rotations = Deform_IK.transform_from_euler(self.rotation_angles.clone())
#self.rotations = Deform_IK.transform_from_euler(self.rotation_angles.clone() * self.mask)
locals = torch.matmul(self.rotations, self.locals)
globals, jpos = self.FK(locals, self.offsets, self.root_id, self.parent, self.translation)
vert_src_update = torch.matmul(globals, self.vert_local[:,0:3,:]) + jpos[..., None]
vert_src_update = torch.sum(vert_src_update * self.skinning.T[:, None, :], dim=0).T
loss = self.crit(vert_src_update, self.constraints)
loss = (loss * self.vismask[:, None]).mean()
loss.backward(retain_graph=True)
self.optimizer.step()
#print(f"iter {i}, loss: {loss.item()}")
return locals, globals, jpos
| 4,992 | 40.957983 | 136 | py |
MoRig | MoRig-master/utils/rot_utils.py | import numpy as np
import math
import torch
def isRotationMatrix(R):
Rt = np.transpose(R, axes=(0, 2, 1))
shouldBeIdentity = np.matmul(Rt, R)
I = np.identity(3, dtype=R.dtype)[None, ...]
n = (np.linalg.norm((I - shouldBeIdentity).reshape(-1, 9), axis=-1)) < 1e-6
return n.sum() == len(n)
def normalize_vector( v, return_mag =False):
batch=v.shape[0]
v_mag = np.sqrt(np.sum(v**2, axis=1))# batch
v_mag = np.maximum(v_mag, 1e-8)
v_mag = v_mag.reshape(batch,1)
v_mag = np.repeat(v_mag, v.shape[1], axis=1)
v = v/v_mag
if(return_mag==True):
return v, v_mag[:,0]
else:
return v
def cross_product(u, v):
# u, v batch*n
batch = u.shape[0]
i = u[:,1]*v[:,2] - u[:,2]*v[:,1]
j = u[:,2]*v[:,0] - u[:,0]*v[:,2]
k = u[:,0]*v[:,1] - u[:,1]*v[:,0]
out = np.concatenate((i.reshape(batch,1), j.reshape(batch,1), k.reshape(batch,1)),1)#batch*3
return out
def mat2continuous6d(R):
return np.concatenate([R[:, :, 0], R[:, :, 1]], axis=-1)
def mat2continuous6d_torch(R):
return torch.cat([R[:, :, 0], R[:, :, 1]], dim=-1)
def continuous6d2mat(ortho6d):
# batchx6
x_raw = ortho6d[:,0:3]#batch*3
y_raw = ortho6d[:,3:6]#batch*3
x = normalize_vector(x_raw) #batch*3
z = cross_product(x,y_raw) #batch*3
z = normalize_vector(z)#batch*3
y = cross_product(z,x)#batch*3
x = x.reshape(-1,3,1)
y = y.reshape(-1,3,1)
z = z.reshape(-1,3,1)
matrix = np.concatenate((x,y,z), 2) #batch*3*3
return matrix
def eular2mat(theta):
batch = theta.shape[0]
cos_0 = np.cos(theta[:, 0])
cos_1 = np.cos(theta[:, 1])
cos_2 = np.cos(theta[:, 2])
sin_0 = np.sin(theta[:, 0])
sin_1 = np.sin(theta[:, 1])
sin_2 = np.sin(theta[:, 2])
R_x = np.eye(3)[None, ...].repeat(batch, axis=0)
R_y = np.eye(3)[None, ...].repeat(batch, axis=0)
R_z = np.eye(3)[None, ...].repeat(batch, axis=0)
R_x[:, 1, 1] = cos_0
R_x[:, 1, 2] = -sin_0
R_x[:, 2, 1] = sin_0
R_x[:, 2, 2] = cos_0
R_y[:, 0, 0] = cos_1
R_y[:, 0, 2] = sin_1
R_y[:, 2, 0] = -sin_1
R_y[:, 2, 2] = cos_1
R_z[:, 0, 0] = cos_2
R_z[:, 0, 1] = -sin_2
R_z[:, 1, 0] = sin_2
R_z[:, 1, 1] = cos_2
R = np.matmul(R_z, np.matmul( R_y, R_x ))
return R
def mat2eular(R) :
batch = R.shape[0]
assert(isRotationMatrix(R))
sy = np.sqrt(R[:, 0, 0] * R[:, 0, 0] + R[:, 1, 0] * R[:, 1, 0])
singular = sy < 1e-6
R_singular = R[singular]
R_nonsingular = R[1-singular]
eulars = np.zeros((batch, 3))
if len(R_nonsingular) > 0:
x_nonsin = np.arctan2(R[:, 2, 1], R[:, 2, 2])
y_nonsin = np.arctan2(-R[:, 2,0], sy)
z_nonsin = np.arctan2(R[:, 1,0], R[:, 0,0])
eulars[np.argwhere(1-singular).squeeze(axis=1)] = np.stack((x_nonsin, y_nonsin, z_nonsin), axis=1)
if len(R_singular) > 0:
x_sin = np.arctan2(-R[:, 1, 2], R[:, 1, 1])
y_sin = np.arctan2(-R[:, 2, 0], sy)
z_sin = np.zeros(batch)
eulars[np.argwhere(singular).squeeze(axis=1)] = np.stack((x_sin, y_sin, z_sin), axis=1)
return eulars
def continuous6d2eular(ortho6d):
# batchx6
mat = continuous6d2mat(ortho6d)
eulars = mat2eular(mat)
return eulars
if __name__ == '__main__':
#numeric value between -π and π
alpha = 3.1
beta = -1.2
gamma = -3.1
mat = eular2mat(np.array([[alpha, beta, gamma], [1.0, -0.5, 1.2]]))
con6d = mat2continuous6d(mat)
mat2 = continuous6d2mat(con6d)
eulars = mat2eular(mat2)
print(eulars)
| 3,572 | 28.04878 | 106 | py |
MoRig | MoRig-master/utils/io_utils.py | #-------------------------------------------------------------------------------
# Name: io_utils.py
# Purpose: utilize functions for file IO
# RigNet Copyright 2020 University of Massachusetts
# RigNet is made available under General Public License Version 3 (GPLv3), or under a Commercial License.
# Please see the LICENSE README.txt file in the main directory for more information and instruction on using and licensing RigNet.
#-------------------------------------------------------------------------------
import os
import numpy as np
from utils import binvox_rw
import torch
from utils.os_utils import mkdir_p
import shutil
from utils.rig_parser import Rig, TreeNode, Info
#from data_proc.gen_skin_data import get_bones
def readPly(filename):
with open(filename, 'r') as fin:
lines = fin.readlines()
pts = []
for li in lines[7:]:
words = li.split()
pts.append(np.array([[float(words[0]), float(words[1]), float(words[2])]]))
pts = np.concatenate(pts, axis=0)
return pts
def writePly(pts, filename):
with open(filename, 'w') as f:
pn = pts.shape[0]
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex %d\n' % (pn) )
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('end_header\n')
for i in range(pn):
f.write('%f %f %f\n' % (pts[i, 0], pts[i, 1], pts[i, 2]) )
def output_point_cloud_ply(xyzs, name, output_folder):
if not os.path.exists( output_folder ):
mkdir_p( output_folder )
print('write: ' + os.path.join(output_folder, name + '.ply'))
with open(os.path.join(output_folder, name + '.ply'), 'w') as f:
pn = xyzs.shape[0]
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex %d\n' % (pn) )
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('end_header\n')
for i in range(pn):
f.write('%f %f %f\n' % (xyzs[i][0], xyzs[i][1], xyzs[i][2]) )
def readVox(vox_filename):
with open(vox_filename, 'rb') as fvox:
vox = binvox_rw.read_as_3d_array(fvox)
return vox
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar', snapshot=None):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if snapshot and state['epoch'] % snapshot == 0:
shutil.copyfile(filepath, os.path.join(checkpoint, 'checkpoint_{}.pth.tar'.format(state['epoch'])))
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
'''
def add_duplicate_joints(rig):
new_names = [rig.names[rig.root_id]]
new_pos = [rig.pos[rig.root_id]]
new_hierarchy = [-1]
this_level = [rig.root_id]
while this_level:
next_level = []
for p_id in this_level:
ch_ids = np.argwhere(rig.hierarchy == p_id).squeeze(axis=1)
if len(ch_ids) > 1:
for dup_id, ch_id in enumerate(ch_ids):
new_names.append(rig.names[p_id] + f"_dup_{dup_id}")
new_pos.append(rig.pos[p_id])
new_hierarchy.append(new_names.index(rig.names[p_id]))
new_names.append(rig.names[ch_id])
new_pos.append(rig.pos[ch_id])
new_hierarchy.append(new_names.index(rig.names[p_id] + f"_dup_{dup_id}"))
elif len(ch_ids) == 1:
ch_id = ch_ids[0]
new_names.append(rig.names[ch_id])
new_pos.append(rig.pos[ch_id])
new_hierarchy.append(new_names.index(rig.names[p_id]))
else:
continue
next_level += ch_ids.tolist()
this_level = next_level
rig_new = Rig()
rig_new.hierarchy = np.array(new_hierarchy)
rig_new.names = new_names
rig_new.root_id = rig.root_id
rig_new.root_name = rig.root_name
rig_new.pos = np.stack(new_pos, axis=0)
rig_new.calc_frames_and_offsets()
return rig_new
def mapping_bone_index(bones_old, bones_new):
bone_map = {}
for i in range(len(bones_old)):
bone_old = bones_old[i][np.newaxis, :]
dist = np.linalg.norm(bones_new - bone_old, axis=1)
ni = np.argmin(dist)
bone_map[i] = ni
return bone_map
def assemble_skel_skin(skel, attachment):
bones_old, bone_names_old, _ = get_bones(skel)
skel_new = add_duplicate_joints(skel)
bones_new, bone_names_new, _ = get_bones(skel_new)
bone_map = mapping_bone_index(bones_old, bones_new)
attachment_new = np.zeros((len(attachment), len(skel_new.names)))
for col_id in range(attachment.shape[1]):
new_bone_id = bone_map[col_id]
new_joint_name = bone_names_new[new_bone_id][0]
new_col_id = skel_new.names.index(new_joint_name)
attachment_new[:, new_col_id] = attachment[:, col_id]
skel_new.skins = attachment_new
return skel_new
'''
def get_bones(skel):
"""
extract bones from skeleton struction
:param skel: input skeleton
:return: bones are B*6 array where each row consists starting and ending points of a bone
bone_name are a list of B elements, where each element consists starting and ending joint name
leaf_bones indicate if this bone is a virtual "leaf" bone.
We add virtual "leaf" bones to the leaf joints since they always have skinning weights as well
"""
bones = []
bone_name = []
leaf_bones = []
this_level = [skel.root]
while this_level:
next_level = []
for p_node in this_level:
p_pos = np.array(p_node.pos)
next_level += p_node.children
for c_node in p_node.children:
c_pos = np.array(c_node.pos)
bones.append(np.concatenate((p_pos, c_pos))[np.newaxis, :])
bone_name.append([p_node.name, c_node.name])
leaf_bones.append(False)
if len(c_node.children) == 0:
bones.append(np.concatenate((c_pos, c_pos))[np.newaxis, :])
bone_name.append([c_node.name, c_node.name+'_leaf'])
leaf_bones.append(True)
this_level = next_level
bones = np.concatenate(bones, axis=0)
return bones, bone_name, leaf_bones
def mapping_bone_index(bones_old, bones_new):
bone_map = {}
for i in range(len(bones_old)):
bone_old = bones_old[i][np.newaxis, :]
dist = np.linalg.norm(bones_new - bone_old, axis=1)
ni = np.argmin(dist)
bone_map[i] = ni
return bone_map
def add_duplicate_joints(skel):
this_level = [skel.root]
while this_level:
next_level = []
for p_node in this_level:
if len(p_node.children) > 1:
new_children = []
for dup_id in range(len(p_node.children)):
p_node_new = TreeNode(p_node.name + '_dup_{:d}'.format(dup_id), p_node.pos)
p_node_new.overlap=True
p_node_new.parent = p_node
p_node_new.children = [p_node.children[dup_id]]
# for user interaction, we move overlapping joints a bit to its children
p_node_new.pos = np.array(p_node_new.pos) + 0.03 * np.linalg.norm(np.array(p_node.children[dup_id].pos) - np.array(p_node_new.pos))
p_node_new.pos = (p_node_new.pos[0], p_node_new.pos[1], p_node_new.pos[2])
p_node.children[dup_id].parent = p_node_new
new_children.append(p_node_new)
p_node.children = new_children
p_node.overlap = False
next_level += p_node.children
this_level = next_level
return skel
def assemble_skel_skin(skel, attachment):
bones_old, bone_names_old, _ = get_bones(skel)
skel_new = add_duplicate_joints(skel)
bones_new, bone_names_new, _ = get_bones(skel_new)
bone_map = mapping_bone_index(bones_old, bones_new)
skel_new.joint_pos = skel_new.get_joint_dict()
skel_new.joint_skin = []
for v in range(len(attachment)):
vi_skin = [str(v)]
skw = attachment[v]
skw = skw / (np.sum(skw) + 1e-10)
for i in range(len(skw)):
if i == len(bones_old):
break
if skw[i] > 1e-5:
bind_joint_name = bone_names_new[bone_map[i]][0]
bind_weight = skw[i]
vi_skin.append(bind_joint_name)
vi_skin.append(str(bind_weight))
skel_new.joint_skin.append(vi_skin)
return skel_new
def output_rigging(skel_name, attachment, output_folder, name):
skel = Info(skel_name)
skel_new = assemble_skel_skin(skel, attachment)
skel_new.save(os.path.join(output_folder, str(name) + '_rig.txt')) | 8,949 | 37.913043 | 151 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/con_training_sts.py | from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
import gzip
import csv
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
model_name = "output/training_nli"
train_batch_size = 16
num_epochs = 4
model_save_path = 'output/training_stsbenchmark_continue_training-'+model_name+'-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
model = SentenceTransformer(model_name)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with open('./KorNLUDatasets/KorSTS/tune_dev.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
dev_samples.append(InputExample(texts= [s1,s2], label=score))
with open('./KorNLUDatasets/KorSTS/tune_test.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
test_samples.append(InputExample(texts= [s1,s2], label=score))
with open('./KorNLUDatasets/KorSTS/tune_train.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
train_samples.append(InputExample(texts= [s1,s2], label=score))
train_dataset = SentencesDataset(train_samples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
# Development set: Measure correlation between cosine score and gold labels
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
warmup_steps = math.ceil(len(train_dataset) * num_epochs / train_batch_size * 0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
test_evaluator(model, output_path=model_save_path)
| 3,065 | 35.5 | 127 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/SemanticSearch.py | from sentence_transformers import SentenceTransformer, util
import numpy as np
model_path = './output/training_sts'
embedder = SentenceTransformer(model_path)
# Corpus with example sentences
corpus = ['한 남자가 음식을 먹는다.',
'한 남자가 빵 한 조각을 먹는다.',
'그 여자가 아이를 돌본다.',
'한 남자가 말을 탄다.',
'한 여자가 바이올린을 연주한다.',
'두 남자가 수레를 숲 솦으로 밀었다.',
'한 남자가 담으로 싸인 땅에서 백마를 타고 있다.',
'원숭이 한 마리가 드럼을 연주한다.',
'치타 한 마리가 먹이 뒤에서 달리고 있다.']
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = ['한 남자가 파스타를 먹는다.',
'고릴라 의상을 입은 누군가가 드럼을 연주하고 있다.',
'치타가 들판을 가로 질러 먹이를 쫓는다.']
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = 5
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
cos_scores = util.pytorch_cos_sim(query_embedding, corpus_embeddings)[0]
cos_scores = cos_scores.cpu()
#We use np.argpartition, to only partially sort the top_k results
top_results = np.argpartition(-cos_scores, range(top_k))[0:top_k]
print("\n\n======================\n\n")
print("Query:", query)
print("\nTop 5 most similar sentences in corpus:")
for idx in top_results[0:top_k]:
print(corpus[idx].strip(), "(Score: %.4f)" % (cos_scores[idx]))
| 1,372 | 30.204545 | 95 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/training_nli.py | from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
model_name = "skt_kobert_model_"
train_batch_size = 16
model_save_path = 'output/training_nli_'
word_embedding_model = models.Transformer(model_name, isKor=True)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
logging.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
with open('./KorNLUDatasets/KorNLI/snli_1.0_train.ko.tsv', "rt", encoding="utf-8") as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, label = line.split('\t')
label = label2int[label.strip()]
train_samples.append(InputExample(texts=[s1, s2], label=label))
train_dataset = SentencesDataset(train_samples, model=model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int))
#Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
with open('./KorNLUDatasets/KorSTS/tune_dev.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
dev_samples.append(InputExample(texts= [s1,s2], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, batch_size=train_batch_size, name='sts-dev')
num_epochs = 1
warmup_steps = math.ceil(len(train_dataset) * num_epochs / train_batch_size * 0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
test_samples = []
with open('./KorNLUDatasets/KorSTS/tune_test.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
test_samples.append(InputExample(texts=[s1,s2], label=score))
print("\n\n\n")
print("======================TEST===================")
print("\n\n\n")
model = SentenceTransformer(model_save_path)
print(f"model save path > {model_save_path}")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, batch_size=train_batch_size, name='sts-test')
test_evaluator(model, output_path=model_save_path)
| 3,595 | 35.323232 | 142 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/training_sts.py | from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import STSBenchmarkDataReader, InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
model_name = "skt_kobert_model_"
train_batch_size = 16
num_epochs = 4
model_save_path = 'output/training_stsbenchmark_'+model_name.replace("/", "-")+'-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
word_embedding_model = models.Transformer(model_name, isKor=True)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with open('./KorNLUDatasets/KorSTS/tune_dev.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
dev_samples.append(InputExample(texts= [s1,s2], label=score))
with open('./KorNLUDatasets/KorSTS/tune_test.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
test_samples.append(InputExample(texts= [s1,s2], label=score))
with open('./KorNLUDatasets/KorSTS/tune_train.tsv', 'rt', encoding='utf-8') as fIn:
lines = fIn.readlines()
for line in lines:
s1, s2, score = line.split('\t')
score = score.strip()
score = float(score) / 5.0
train_samples.append(InputExample(texts= [s1,s2], label=score))
train_dataset = SentencesDataset(train_samples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataset) * num_epochs / train_batch_size * 0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
test_evaluator(model, output_path=model_save_path)
| 3,565 | 36.536842 | 127 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_encoder_decoder.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures """
import logging
import os
from torch import nn
from .modeling_auto import AutoModel, AutoModelWithLMHead
logger = logging.getLogger(__name__)
class PreTrainedEncoderDecoder(nn.Module):
r"""
:class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be
instantiated as a transformer architecture with one of the base model
classes of the library as encoder and (optionally) another one as
decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
@classmethod
def from_pretrained(
cls,
encoder_pretrained_model_name_or_path=None,
decoder_pretrained_model_name_or_path=None,
*model_args,
**kwargs
):
r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you need to first set it back in training mode with `model.train()`
Params:
encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders.
Examples::
# For example purposes. Not runnable.
model = PreTrainedEncoderDecoder.from_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
"""
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as a whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_") and not argument.startswith("decoder_")
}
kwargs_decoder = kwargs_common.copy()
kwargs_encoder = kwargs_common.copy()
kwargs_encoder.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
kwargs_decoder.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
encoder.config.is_decoder = False
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
decoder = AutoModelWithLMHead.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
decoder.config.is_decoder = True
model = cls(encoder, decoder)
return model
def save_pretrained(self, save_directory):
""" Save a Seq2Seq model and its configuration file in a format such
that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained`
We save the encoder' and decoder's parameters in two separate directories.
"""
# If the root output directory does not exist, create it
if not os.path.exists(save_directory):
os.mkdir(save_directory)
# Check whether the output directory is empty or not
sub_directories = [
directory
for directory in os.listdir(save_directory)
if os.path.isdir(os.path.join(save_directory, directory))
]
if len(sub_directories) > 0:
if "encoder" in sub_directories and "decoder" in sub_directories:
print(
"WARNING: there is an older version of encoder-decoder saved in"
+ " the output directory. The default behaviour is to overwrite them."
)
# Empty the output directory
for directory_to_remove in sub_directories:
# Remove all files into the subdirectory
files_to_remove = os.listdir(os.path.join(save_directory, directory_to_remove))
for file_to_remove in files_to_remove:
os.remove(os.path.join(save_directory, directory_to_remove, file_to_remove))
# Remove the subdirectory itself
os.rmdir(os.path.join(save_directory, directory_to_remove))
assert len(os.listdir(save_directory)) == 0 # sanity check
# Create the "encoder" directory inside the output directory and save the encoder into it
if not os.path.exists(os.path.join(save_directory, "encoder")):
os.mkdir(os.path.join(save_directory, "encoder"))
self.encoder.save_pretrained(os.path.join(save_directory, "encoder"))
# Create the "encoder" directory inside the output directory and save the decoder into it
if not os.path.exists(os.path.join(save_directory, "decoder")):
os.mkdir(os.path.join(save_directory, "decoder"))
self.decoder.save_pretrained(os.path.join(save_directory, "decoder"))
def forward(self, encoder_input_ids, decoder_input_ids, **kwargs):
""" The forward pass on a seq2eq depends what we are performing:
- During training we perform one forward pass through both the encoder
and decoder;
- During prediction, we perform one forward pass through the encoder,
and then perform several forward passes with the encoder's hidden
state through the decoder to decode a full sequence.
Therefore, we skip the forward pass on the encoder if an argument named
`encoder_hidden_state` is passed to this function.
Params:
encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of encoder input sequence tokens in the vocabulary.
decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of decoder input sequence tokens in the vocabulary.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
"""
kwargs_encoder, kwargs_decoder = self.prepare_model_kwargs(**kwargs)
# Encode if needed (training, first prediction pass)
encoder_hidden_states = kwargs_encoder.pop("hidden_states", None)
if encoder_hidden_states is None:
encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder)
encoder_hidden_states = encoder_outputs[0]
else:
encoder_outputs = ()
kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states
decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder)
return decoder_outputs + encoder_outputs
| 13,316 | 55.189873 | 472 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/filep.py | from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained('gpt2')
generated = tokenizer.encode("The Manhattan bridge")
context = torch.tensor([generated])
past = None
for i in range(15):
output, past = model(context, past=past)
distribution = output[0, :]
# Get the top 10 values' indices and cast them to a list
top_values = distribution[-1].topk(10).indices.tolist()
# Decode those into words
top_words = [tokenizer.decode([x]) for x in top_values.indices.tolist()]
# select words (only arbitrarily select the first three)
words = words[0:3]
# Cast them back to tokens which can be used as an added token
selected_tokens = [tokenizer.encode(word) for word in words]
generated += [argmax_token.tolist()]
context = argmax_token.unsqueeze(0)
print(tokenizer.decode([argmax_token.tolist()]))
sequence = tokenizer.decode(generated)
print(sequence) | 1,009 | 27.857143 | 76 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_albert.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ALBERT model. """
import logging
import tensorflow as tf
from .configuration_albert import AlbertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_bert import ACT2FN, TFBertSelfAttention
from .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list
logger = logging.getLogger(__name__)
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-with-prefix-tf_model.h5",
"albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-with-prefix-tf_model.h5",
"albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-with-prefix-tf_model.h5",
"albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-with-prefix-tf_model.h5",
"albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-with-prefix-tf_model.h5",
"albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-with-prefix-tf_model.h5",
"albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-with-prefix-tf_model.h5",
"albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-with-prefix-tf_model.h5",
}
class TFAlbertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.embedding_size,
embeddings_initializer=get_initializer(self.config.initializer_range),
name="position_embeddings",
)
self.token_type_embeddings = tf.keras.layers.Embedding(
config.type_vocab_size,
config.embedding_size,
embeddings_initializer=get_initializer(self.config.initializer_range),
name="token_type_embeddings",
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def build(self, input_shape):
"""Build shared word embedding layer """
with tf.name_scope("word_embeddings"):
# Create and initialize weights. The random normal initializer was chosen
# arbitrarily, and works well.
self.word_embeddings = self.add_weight(
"weight",
shape=[self.config.vocab_size, self.config.embedding_size],
initializer=get_initializer(self.config.initializer_range),
)
super().build(input_shape)
def call(self, inputs, mode="embedding", training=False):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs, training=training)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, position_ids, token_type_ids, inputs_embeds = inputs
if input_ids is not None:
input_shape = shape_list(input_ids)
else:
input_shape = shape_list(inputs_embeds)[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
if inputs_embeds is None:
inputs_embeds = tf.gather(self.word_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [batch_size, length, embedding_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
batch_size = shape_list(inputs)[0]
length = shape_list(inputs)[1]
x = tf.reshape(inputs, [-1, self.config.embedding_size])
logits = tf.matmul(x, self.word_embeddings, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.config.vocab_size])
class TFAlbertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
assert config.hidden_size % config.num_attention_heads == 0
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
hidden_states, attention_mask, head_mask = inputs
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
# scale attention_scores
dk = tf.cast(shape_list(key_layer)[-1], tf.float32)
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class TFAlbertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
hidden_states, input_tensor = inputs
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFAlbertAttention(TFBertSelfAttention):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.hidden_size = config.hidden_size
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
def call(self, inputs, training=False):
input_tensor, attention_mask, head_mask = inputs
batch_size = shape_list(input_tensor)[0]
mixed_query_layer = self.query(input_tensor)
mixed_key_layer = self.key(input_tensor)
mixed_value_layer = self.value(input_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
# scale attention_scores
dk = tf.cast(shape_list(key_layer)[-1], tf.float32)
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
self_outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
hidden_states = self_outputs[0]
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
attention_output = self.LayerNorm(hidden_states + input_tensor)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
class TFAlbertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFAlbertAttention(config, name="attention")
self.ffn = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn"
)
if isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
self.ffn_output = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output"
)
self.full_layer_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="full_layer_layer_norm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
hidden_states, attention_mask, head_mask = inputs
attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training)
ffn_output = self.ffn(attention_outputs[0])
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_outputs[0])
# add attentions if we output them
outputs = (hidden_states,) + attention_outputs[1:]
return outputs
class TFAlbertLayerGroup(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.albert_layers = [
TFAlbertLayer(config, name="albert_layers_._{}".format(i)) for i in range(config.inner_group_num)
]
def call(self, inputs, training=False):
hidden_states, attention_mask, head_mask = inputs
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer([hidden_states, attention_mask, head_mask[layer_index]], training=training)
hidden_states = layer_output[0]
if self.output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if self.output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if self.output_attentions:
outputs = outputs + (layer_attentions,)
# last-layer hidden state, (layer hidden states), (layer attentions)
return outputs
class TFAlbertTransformer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embedding_hidden_mapping_in = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="embedding_hidden_mapping_in",
)
self.albert_layer_groups = [
TFAlbertLayerGroup(config, name="albert_layer_groups_._{}".format(i))
for i in range(config.num_hidden_groups)
]
def call(self, inputs, training=False):
hidden_states, attention_mask, head_mask = inputs
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_attentions = ()
if self.output_hidden_states:
all_hidden_states = (hidden_states,)
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
[
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
],
training=training,
)
hidden_states = layer_group_output[0]
if self.output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
# last-layer hidden state, (all hidden states), (all attentions)
return outputs
class TFAlbertPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = AlbertConfig
pretrained_model_archive_map = TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "albert"
class TFAlbertMLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
self.decoder_bias = self.add_weight(
shape=(self.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
)
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states, mode="linear") + self.decoder_bias
hidden_states = hidden_states + self.bias
return hidden_states
@keras_serializable
class TFAlbertMainLayer(tf.keras.layers.Layer):
config_class = AlbertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.num_hidden_layers = config.num_hidden_layers
self.embeddings = TFAlbertEmbeddings(config, name="embeddings")
self.encoder = TFAlbertTransformer(config, name="encoder")
self.pooler = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="pooler",
)
def get_input_embeddings(self):
return self.embeddings
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
embedding_output = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)
encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output[:, 0])
# add hidden_states and attentions if they are here
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]
# sequence_output, pooled_output, (hidden_states), (attentions)
return outputs
ALBERT_START_DOCSTRING = r"""
This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`:
https://arxiv.org/abs/1909.11942
.. _`tf.keras.Model`:
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.AlbertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare Albert Model transformer outputing raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class TFAlbertModel(TFAlbertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, name="albert")
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Albert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import AlbertTokenizer, TFAlbertModel
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = TFAlbertModel.from_pretrained('albert-base-v2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.albert(inputs, **kwargs)
return outputs
@add_start_docstrings("""Albert Model with a `language modeling` head on top. """, ALBERT_START_DOCSTRING)
class TFAlbertForMaskedLM(TFAlbertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super(TFAlbertForMaskedLM, self).__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, name="albert")
self.predictions = TFAlbertMLMHead(config, self.albert.embeddings, name="predictions")
def get_output_embeddings(self):
return self.albert.embeddings
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
prediction_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import AlbertTokenizer, TFAlbertForMaskedLM
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = TFAlbertForMaskedLM.from_pretrained('albert-base-v2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores = outputs[0]
"""
outputs = self.albert(inputs, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.predictions(sequence_output, training=kwargs.get("training", False))
# Add hidden states and attention if they are here
outputs = (prediction_scores,) + outputs[2:]
return outputs # prediction_scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
ALBERT_START_DOCSTRING,
)
class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super(TFAlbertForSequenceClassification, self).__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, name="albert")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
logits (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`)
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import AlbertTokenizer, TFAlbertForSequenceClassification
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = TFAlbertForSequenceClassification.from_pretrained('albert-base-v2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
outputs = self.albert(inputs, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, training=kwargs.get("training", False))
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
return outputs # logits, (hidden_states), (attentions)
| 39,034 | 46.14372 | 159 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__)
def get_constant_schedule(optimizer, last_epoch=-1):
""" Create a schedule with a constant learning rate.
"""
return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):
""" Create a schedule with a constant learning rate preceded by a warmup
period during which the learning rate increases linearly between 0 and 1.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=-1):
""" Create a schedule with a learning rate that decreases following the
values of the cosine function between 0 and `pi * cycles` after a warmup
period during which it increases linearly between 0 and 1.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1
):
""" Create a schedule with a learning rate that decreases following the
values of the cosine function with several hard restarts, after a warmup
period during which it increases linearly between 0 and 1.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(-group["lr"] * group["weight_decay"], p.data)
return loss
| 7,667 | 41.837989 | 130 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_mmbt.py | # coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch MMBT model. """
import logging
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
class ModalEmbeddings(nn.Module):
"""Generic Modal Embeddings which takes in an encoder, and a transformer embedding.
"""
def __init__(self, config, encoder, embeddings):
super().__init__()
self.config = config
self.encoder = encoder
self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
self.position_embeddings = embeddings.position_embeddings
self.token_type_embeddings = embeddings.token_type_embeddings
self.word_embeddings = embeddings.word_embeddings
self.LayerNorm = embeddings.LayerNorm
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
token_embeddings = self.proj_embeddings(self.encoder(input_modal))
seq_length = token_embeddings.size(1)
if start_token is not None:
start_token_embeds = self.word_embeddings(start_token)
seq_length += 1
token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
if end_token is not None:
end_token_embeds = self.word_embeddings(end_token)
seq_length += 1
token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
if token_type_ids is None:
token_type_ids = torch.zeros(
(input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device
)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = token_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
MMBT_START_DOCSTRING = r""" MMBT model was proposed in
`Supervised Multimodal Bitransformers for Classifying Images and Text`_
by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
It's a supervised multimodal bitransformer model that fuses information from text and other image encoders,
and obtain state-of-the-art performance on various multimodal classification benchmark tasks.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Supervised Multimodal Bitransformers for Classifying Images and Text`:
https://github.com/facebookresearch/mmbt
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.MMBTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
transformer (:class: `~nn.Module`): A text transformer that is used by MMBT.
It should have embeddings, encoder, and pooler attributes.
encoder (:class: `~nn.Module`): Encoder for the second modality.
It should take in a batch of modal inputs and return k, n dimension embeddings.
"""
MMBT_INPUTS_DOCSTRING = r""" Inputs:
**input_modal**: ``torch.FloatTensor`` of shape ``(batch_size, ***)``:
The other modality data. It will be the shape that the encoder for that type expects.
e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width)
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
It does not expect [CLS] token to be added as it's appended to the end of other modality embeddings.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**modal_start_tokens**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for Classification tasks.
**modal_end_tokens**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate different portions of the inputs.
**modal_token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``:
Segment token indices to indicate different portions of the non-text modality.
The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
**modal_position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
**encoder_hidden_states**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``:
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model
is configured as a decoder.
**encoder_attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
@add_start_docstrings(
"The bare MMBT Model outputting raw hidden-states without any specific head on top.",
MMBT_START_DOCSTRING,
MMBT_INPUTS_DOCSTRING,
)
class MMBTModel(nn.Module):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
mmbt = MMBTModel(config, transformer, encoder)
"""
def __init__(self, config, transformer, encoder):
super().__init__()
self.config = config
self.transformer = transformer
self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
def forward(
self,
input_modal,
input_ids=None,
modal_start_tokens=None,
modal_end_tokens=None,
attention_mask=None,
token_type_ids=None,
modal_token_type_ids=None,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_txt_shape = input_ids.size()
elif inputs_embeds is not None:
input_txt_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
modal_embeddings = self.modal_encoder(
input_modal,
start_token=modal_start_tokens,
end_token=modal_end_tokens,
position_ids=modal_position_ids,
token_type_ids=modal_token_type_ids,
)
input_modal_shape = modal_embeddings.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)
txt_embeddings = self.transformer.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
input_shape = embedding_output.size()[:-1]
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
else:
attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1
)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
else:
encoder_attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.transformer.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.transformer.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@add_start_docstrings(
"""MMBT Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output)""",
MMBT_START_DOCSTRING,
MMBT_INPUTS_DOCSTRING,
)
class MMBTForClassification(nn.Module):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
model = MMBTForClassification(config, transformer, encoder)
outputs = model(input_modal, input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config, transformer, encoder):
super().__init__()
self.num_labels = config.num_labels
self.mmbt = MMBTModel(config, transformer, encoder)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(
self,
input_modal,
input_ids=None,
modal_start_tokens=None,
modal_end_tokens=None,
attention_mask=None,
token_type_ids=None,
modal_token_type_ids=None,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.mmbt(
input_modal=input_modal,
input_ids=input_ids,
modal_start_tokens=modal_start_tokens,
modal_end_tokens=modal_end_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
modal_token_type_ids=modal_token_type_ids,
position_ids=position_ids,
modal_position_ids=modal_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
| 21,362 | 49.864286 | 138 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/__main__.py | # coding: utf8
def main():
import sys
if (len(sys.argv) < 4 or len(sys.argv) > 6) or sys.argv[1] not in ["bert", "gpt", "transfo_xl", "gpt2", "xlnet", "xlm"]:
print(
"This command line utility let you convert original (author released) model checkpoint to pytorch.\n"
"It should be used as one of: \n"
">> transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT, \n"
">> transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG], \n"
">> transformers transfo_xl TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG] or \n"
">> transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG] or \n"
">> transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME] or \n"
">> transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT")
else:
if sys.argv[1] == "bert":
try:
from .convert_bert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "gpt":
from .convert_openai_original_tf_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
if len(sys.argv) < 4 or len(sys.argv) > 5:
# pylint: disable=line-too-long
print("Should be used as `transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`")
else:
OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
OPENAI_GPT_CONFIG = sys.argv[4]
else:
OPENAI_GPT_CONFIG = ""
convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
OPENAI_GPT_CONFIG,
PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "transfo_xl":
try:
from .convert_transfo_xl_original_tf_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) < 4 or len(sys.argv) > 5:
# pylint: disable=line-too-long
print("Should be used as `transformers transfo_xl TF_CHECKPOINT/TF_DATASET_FILE PYTORCH_DUMP_OUTPUT [TF_CONFIG]`")
else:
if 'ckpt' in sys.argv[2].lower():
TF_CHECKPOINT = sys.argv[2]
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = sys.argv[2]
TF_CHECKPOINT = ""
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
elif sys.argv[1] == "gpt2":
try:
from .convert_gpt2_original_tf_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) < 4 or len(sys.argv) > 5:
# pylint: disable=line-too-long
print("Should be used as `transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [TF_CONFIG]`")
else:
TF_CHECKPOINT = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "xlnet":
try:
from .convert_xlnet_original_tf_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch
except ImportError:
print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) < 5 or len(sys.argv) > 6:
# pylint: disable=line-too-long
print("Should be used as `transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME]`")
else:
TF_CHECKPOINT = sys.argv[2]
TF_CONFIG = sys.argv[3]
PYTORCH_DUMP_OUTPUT = sys.argv[4]
if len(sys.argv) == 6:
FINETUNING_TASK = sys.argv[5]
else:
FINETUNING_TASK = None
convert_xlnet_checkpoint_to_pytorch(TF_CHECKPOINT,
TF_CONFIG,
PYTORCH_DUMP_OUTPUT,
FINETUNING_TASK)
elif sys.argv[1] == "xlm":
from .convert_xlm_original_pytorch_checkpoint_to_pytorch import convert_xlm_checkpoint_to_pytorch
if len(sys.argv) != 4:
# pylint: disable=line-too-long
print("Should be used as `transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT`")
else:
XLM_CHECKPOINT_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
convert_xlm_checkpoint_to_pytorch(XLM_CHECKPOINT_PATH, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 7,085 | 53.507692 | 135 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/configuration_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import logging
import os
from typing import Dict, Optional, Tuple
from .file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
r""" Base class for all configuration classes.
Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes):
- ``pretrained_config_archive_map``: a python ``dict`` with `shortcut names` (string) as keys and `url` (string) of associated pretrained model configurations as values.
- ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`.
Args:
finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`):
Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.
num_labels (:obj:`int`, `optional`, defaults to `2`):
Number of classes to use when the model is a classification model (sequences/tokens)
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Should the model returns attentions weights.
output_hidden_states (:obj:`string`, `optional`, defaults to :obj:`False`):
Should the model returns all hidden-states.
torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`):
Is the model used with Torchscript (for PyTorch models).
"""
pretrained_config_archive_map = {} # type: Dict[str, str]
model_type = "" # type: str
def __init__(self, **kwargs):
# Attributes with defaults
self.output_attentions = kwargs.pop("output_attentions", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_past = kwargs.pop("output_past", True) # Not used by all models
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.num_labels = kwargs.pop("num_labels", 2)
self.id2label = kwargs.pop("id2label", {i: "LABEL_{}".format(i) for i in range(self.num_labels)})
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
self.label2id = kwargs.pop("label2id", dict(zip(self.id2label.values(), self.id2label.keys())))
self.label2id = dict((key, int(value)) for key, value in self.label2id.items())
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@property
def num_labels(self):
return self._num_labels
@num_labels.setter
def num_labels(self, num_labels):
self._num_labels = num_labels
self.id2label = {i: "LABEL_{}".format(i) for i in range(self.num_labels)}
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
self.label2id = dict((key, int(value)) for key, value in self.label2id.items())
def save_pretrained(self, save_directory):
"""
Save a configuration object to the directory `save_directory`, so that it
can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`string`):
Directory where the configuration JSON file will be saved.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
logger.info("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.
Args:
pretrained_model_name_or_path (:obj:`string`):
either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache or
download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to
our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.:
``./my_model_directory/configuration.json``.
cache_dir (:obj:`string`, `optional`):
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
kwargs (:obj:`Dict[str, any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is
controlled by the `return_unused_kwargs` keyword parameter.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Force to (re-)download the model weights and configuration files and override the cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies (:obj:`Dict`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.:
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`
The proxies are used on each request.
return_unused_kwargs: (`optional`) bool:
If False, then this function returns just the final configuration object.
If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part
of kwargs which has not been used to update `config` and is otherwise ignored.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: str, pretrained_config_archive_map: Optional[Dict] = None, **kwargs
) -> Tuple[Dict, Dict]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used
for instantiating a Config using `from_dict`.
Parameters:
pretrained_model_name_or_path (:obj:`string`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
pretrained_config_archive_map: (:obj:`Dict[str, str]`, `optional`) Dict:
A map of `shortcut names` to `url`. By default, will use the current class attribute.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
if pretrained_config_archive_map is None:
pretrained_config_archive_map = cls.pretrained_config_archive_map
if pretrained_model_name_or_path in pretrained_config_archive_map:
config_file = pretrained_config_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(pretrained_model_name_or_path, postfix=CONFIG_NAME)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_config_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file
)
else:
msg = (
"Can't load '{}'. Make sure that:\n\n"
"- '{}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
"- or '{}' is the correct path to a directory containing a '{}' file\n\n".format(
pretrained_model_name_or_path,
pretrained_model_name_or_path,
pretrained_model_name_or_path,
CONFIG_NAME,
)
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
"Couldn't reach server at '{}' to download configuration file or "
"configuration file is not a valid JSON file. "
"Please check network or file content here: {}.".format(config_file, resolved_config_file)
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file))
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict, **kwargs) -> "PretrainedConfig":
"""
Constructs a `Config` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved
from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict`
method.
kwargs (:obj:`Dict[str, any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", str(config))
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: str) -> "PretrainedConfig":
"""
Constructs a `Config` from the path to a json file of parameters.
Args:
json_file (:obj:`string`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: str):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_dict(self):
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
return output
def to_json_string(self):
"""
Serializes this instance to a JSON string.
Returns:
:obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.
"""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
"""
Save this instance to a json file.
Args:
json_file_path (:obj:`string`):
Path to the JSON file in which this configuration instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
def update(self, config_dict: Dict):
"""
Updates attributes of this class
with attributes from `config_dict`.
Args:
:obj:`Dict[str, any]`: Dictionary of attributes that shall be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
| 18,955 | 46.989873 | 193 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/optimization_tf.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions and classes related to optimization (weight updates)."""
import re
import tensorflow as tf
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applys a warmup schedule on a given learning rate decay schedule."""
def __init__(self, initial_learning_rate, decay_schedule_fn, warmup_steps, power=1.0, name=None):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power)
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name,
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def create_optimizer(init_lr, num_train_steps, num_warmup_steps):
"""Creates an optimizer with learning rate schedule."""
# Implements linear decay of the learning rate.
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr, decay_steps=num_train_steps, end_learning_rate=0.0
)
if num_warmup_steps:
learning_rate_fn = WarmUp(
initial_learning_rate=init_lr, decay_schedule_fn=learning_rate_fn, warmup_steps=num_warmup_steps
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["layer_norm", "bias"],
)
return optimizer
class AdamWeightDecay(tf.keras.optimizers.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
correct way of using L2 regularization/weight decay with Adam, since that will
interact with the m and v parameters in strange ways.
Instead we want ot decay the weights in a manner that doesn't interact with
the m/v parameters. This is equivalent to adding the square of the weights to
the loss with plain (non-momentum) SGD.
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name="AdamWeightDecay",
**kwargs
):
super().__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
@classmethod
def from_config(cls, config):
"""Creates an optimizer from its config with WarmUp custom object."""
custom_objects = {"WarmUp": WarmUp}
return super().from_config(config, custom_objects=custom_objects)
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state["weight_decay_rate"] = tf.constant(self.weight_decay_rate, name="adam_weight_decay_rate")
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state["weight_decay_rate"], use_locking=self._use_locking
)
return tf.no_op()
def apply_gradients(self, grads_and_vars, clip_norm, name=None):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return super().apply_gradients(zip(grads, tvars))
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients["lr_t"], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super()._resource_apply_dense(grad, var, **kwargs)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super()._resource_apply_sparse(grad, var, indices, **kwargs)
def get_config(self):
config = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
# Inspired from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py
class GradientAccumulator(object):
"""Distribution strategies-aware gradient accumulation utility."""
def __init__(self):
"""Initializes the accumulator."""
self._gradients = []
self._accum_steps = tf.Variable(
initial_value=0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA
)
@property
def step(self):
"""Number of accumulated steps."""
return self._accum_steps.value()
@property
def gradients(self):
"""The accumulated gradients."""
return list(
gradient.value() if gradient is not None else gradient for gradient in self._get_replica_gradients()
)
def __call__(self, gradients):
"""Accumulates :obj:`gradients`."""
if not self._gradients:
self._gradients.extend(
[
tf.Variable(tf.zeros_like(gradient), trainable=False) if gradient is not None else gradient
for gradient in gradients
]
)
if len(gradients) != len(self._gradients):
raise ValueError("Expected %s gradients, but got %d" % (len(self._gradients), len(gradients)))
for accum_gradient, gradient in zip(self._get_replica_gradients(), gradients):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(gradient)
self._accum_steps.assign_add(1)
def reset(self):
"""Resets the accumulated gradients."""
if self._gradients:
self._accum_steps.assign(0)
for gradient in self._get_replica_gradients():
if gradient is not None:
gradient.assign(tf.zeros_like(gradient))
def _get_replica_gradients(self):
if tf.distribute.has_strategy():
# In a replica context, we want to accumulate gradients on each replica
# without synchronization, so we directly assign the value of the
# current replica.
replica_context = tf.distribute.get_replica_context()
if replica_context is None or tf.distribute.get_strategy().num_replicas_in_sync == 1:
return self._gradients
return (
gradient.device_map.select_for_current_replica(gradient.values, replica_context)
for gradient in self._gradients
if gradient is not None
)
else:
return self._gradients
| 9,893 | 38.895161 | 115 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_pytorch_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
""" Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch # noqa: F401
import tensorflow as tf # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
""" Load TF 2.0 model in a pytorch model
"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
""" Load TF2.0 symbolic weights in a PyTorch model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__, unexpected_keys)
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
| 12,952 | 38.01506 | 155 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_distilbert.py | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DistilBERT model
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
"""
import copy
import logging
import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .activations import gelu
from .configuration_distilbert import DistilBertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, prune_linear_layer
logger = logging.getLogger(__name__)
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin",
"distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin",
"distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-pytorch_model.bin",
"distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-distilled-squad-pytorch_model.bin",
"distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-pytorch_model.bin",
"distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-pytorch_model.bin",
"distilbert-base-uncased-finetuned-sst-2-english": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-finetuned-sst-2-english-pytorch_model.bin",
}
# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
class Embeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
if config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(
n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight
)
self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
self.dropout = nn.Dropout(config.dropout)
def forward(self, input_ids):
"""
Parameters
----------
input_ids: torch.tensor(bs, max_seq_length)
The token ids to embed.
Outputs
-------
embeddings: torch.tensor(bs, max_seq_length, dim)
The embedded tokens (plus position embeddings, no token_type embeddings)
"""
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
return embeddings
class MultiHeadSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = nn.Dropout(p=config.attention_dropout)
self.output_attentions = config.output_attentions
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
mask = torch.ones(self.n_heads, attention_head_size)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query, key, value, mask, head_mask=None):
"""
Parameters
----------
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Outputs
-------
weights: torch.tensor(bs, n_heads, seq_length, seq_length)
Attention weights
context: torch.tensor(bs, seq_length, dim)
Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x):
""" separate heads """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" group heads """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length)
weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)
weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if self.output_attentions:
return (context, weights)
else:
return (context,)
class FFN(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(p=config.dropout)
self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
assert config.activation in ["relu", "gelu"], "activation ({}) must be in ['relu', 'gelu']".format(
config.activation
)
self.activation = gelu if config.activation == "gelu" else nn.ReLU()
def forward(self, input):
x = self.lin1(input)
x = self.activation(x)
x = self.lin2(x)
x = self.dropout(x)
return x
class TransformerBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
assert config.dim % config.n_heads == 0
self.attention = MultiHeadSelfAttention(config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(self, x, attn_mask=None, head_mask=None):
"""
Parameters
----------
x: torch.tensor(bs, seq_length, dim)
attn_mask: torch.tensor(bs, seq_length)
Outputs
-------
sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length)
The attention weights
ffn_output: torch.tensor(bs, seq_length, dim)
The output of the transformer block contextualization.
"""
# Self-Attention
sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask)
if self.output_attentions:
sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples
assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
output = (ffn_output,)
if self.output_attentions:
output = (sa_weights,) + output
return output
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.n_layers = config.n_layers
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
layer = TransformerBlock(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layers)])
def forward(self, x, attn_mask=None, head_mask=None):
"""
Parameters
----------
x: torch.tensor(bs, seq_length, dim)
Input sequence embedded.
attn_mask: torch.tensor(bs, seq_length)
Attention mask on the sequence.
Outputs
-------
hidden_state: torch.tensor(bs, seq_length, dim)
Sequence of hiddens states in the last (top) layer
all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
all_hidden_states = ()
all_attentions = ()
hidden_state = x
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
layer_outputs = layer_module(x=hidden_state, attn_mask=attn_mask, head_mask=head_mask[i])
hidden_state = layer_outputs[-1]
if self.output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
outputs = (hidden_state,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
class DistilBertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = DistilBertConfig
pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = None
base_model_prefix = "distilbert"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, nn.Embedding):
if module.weight.requires_grad:
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
DISTILBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
DISTILBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.DistilBertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
DISTILBERT_START_DOCSTRING,
)
class DistilBertModel(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = Embeddings(config) # Embeddings
self.transformer = Transformer(config) # Encoder
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.transformer.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DistilBertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DistilBertTokenizer, DistilBertModel
import torch
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = DistilBertModel.from_pretrained('distilbert-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)
tfmr_output = self.transformer(x=inputs_embeds, attn_mask=attention_mask, head_mask=head_mask)
hidden_state = tfmr_output[0]
output = (hidden_state,) + tfmr_output[1:]
return output # last-layer hidden-state, (all hidden_states), (all attentions)
@add_start_docstrings(
"""DistilBert Model with a `masked language modeling` head on top. """, DISTILBERT_START_DOCSTRING,
)
class DistilBertForMaskedLM(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.distilbert = DistilBertModel(config)
self.vocab_transform = nn.Linear(config.dim, config.dim)
self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
self.init_weights()
self.mlm_loss_fct = nn.CrossEntropyLoss()
def get_output_embeddings(self):
return self.vocab_projector
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DistilBertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DistilBertTokenizer, DistilBertForMaskedLM
import torch
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = DistilBertForMaskedLM.from_pretrained('distilbert-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
dlbrt_output = self.distilbert(
input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds
)
hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
prediction_logits = gelu(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
outputs = (prediction_logits,) + dlbrt_output[1:]
if masked_lm_labels is not None:
mlm_loss = self.mlm_loss_fct(
prediction_logits.view(-1, prediction_logits.size(-1)), masked_lm_labels.view(-1)
)
outputs = (mlm_loss,) + outputs
return outputs # (mlm_loss), prediction_logits, (all hidden_states), (all attentions)
@add_start_docstrings(
"""DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
DISTILBERT_START_DOCSTRING,
)
class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, config.num_labels)
self.dropout = nn.Dropout(config.seq_classif_dropout)
self.init_weights()
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DistilBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
import torch
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
distilbert_output = self.distilbert(
input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output) # (bs, dim)
logits = self.classifier(pooled_output) # (bs, dim)
outputs = (logits,) + distilbert_output[1:]
if labels is not None:
if self.num_labels == 1:
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
DISTILBERT_START_DOCSTRING,
)
class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.qa_outputs = nn.Linear(config.dim, config.num_labels)
assert config.num_labels == 2
self.dropout = nn.Dropout(config.qa_dropout)
self.init_weights()
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DistilBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DistilBertTokenizer, DistilBertForQuestionAnswering
import torch
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss, start_scores, end_scores = outputs[:3]
"""
distilbert_output = self.distilbert(
input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds
)
hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1) # (bs, max_query_len)
end_logits = end_logits.squeeze(-1) # (bs, max_query_len)
outputs = (start_logits, end_logits,) + distilbert_output[1:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""DistilBert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
DISTILBERT_START_DOCSTRING,
)
class DistilBertForTokenClassification(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.distilbert = DistilBertModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DistilBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DistilBertTokenizer, DistilBertForTokenClassification
import torch
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = DistilBertForTokenClassification.from_pretrained('distilbert-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.distilbert(
input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
| 39,945 | 47.069795 | 175 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_xlm_roberta.py | # coding=utf-8
# Copyright 2019 Facebook AI Research and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 XLM-RoBERTa model. """
import logging
from .configuration_xlm_roberta import XLMRobertaConfig
from .file_utils import add_start_docstrings
from .modeling_tf_roberta import (
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaModel,
)
logger = logging.getLogger(__name__)
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {}
XLM_ROBERTA_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.XLMRobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
@add_start_docstrings(
"The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
XLM_ROBERTA_START_DOCSTRING,
)
class TFXLMRobertaModel(TFRobertaModel):
"""
This class overrides :class:`~transformers.TFRobertaModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""XLM-RoBERTa Model with a `language modeling` head on top. """, XLM_ROBERTA_START_DOCSTRING,
)
class TFXLMRobertaForMaskedLM(TFRobertaForMaskedLM):
"""
This class overrides :class:`~transformers.TFRobertaForMaskedLM`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
XLM_ROBERTA_START_DOCSTRING,
)
class TFXLMRobertaForSequenceClassification(TFRobertaForSequenceClassification):
"""
This class overrides :class:`~transformers.TFRobertaForSequenceClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""XLM-RoBERTa Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLM_ROBERTA_START_DOCSTRING,
)
class TFXLMRobertaForTokenClassification(TFRobertaForTokenClassification):
"""
This class overrides :class:`~transformers.TFRobertaForTokenClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
| 4,827 | 39.571429 | 127 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT-2 model. """
import logging
import numpy as np
import tensorflow as tf
from .configuration_gpt2 import GPT2Config
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import (
TFConv1D,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
keras_serializable,
shape_list,
)
logger = logging.getLogger(__name__)
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {
"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-tf_model.h5",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-tf_model.h5",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-tf_model.h5",
"gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-tf_model.h5",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-tf_model.h5",
}
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask, head_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, layer_past, attention_mask, head_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=0)
key = tf.concat([past_key, key], axis=-2)
value = tf.concat([past_value, value], axis=-2)
present = tf.stack([key, value], axis=0)
attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
self.attn = TFAttention(nx, n_ctx, config, scale, name="attn")
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
self.mlp = TFMLP(4 * nx, config, name="mlp")
def call(self, inputs, training=False):
x, layer_past, attention_mask, head_mask = inputs
a = self.ln_1(x)
output_attn = self.attn([a, layer_past, attention_mask, head_mask], training=training)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.ln_2(x)
m = self.mlp(m, training=training)
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
@keras_serializable
class TFGPT2MainLayer(tf.keras.layers.Layer):
config_class = GPT2Config
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.wte = TFSharedEmbeddings(
config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
)
self.wpe = tf.keras.layers.Embedding(
config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name="wpe",
)
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx, config, scale=True, name="h_._{}".format(i)) for i in range(config.n_layer)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
def get_input_embeddings(self):
return self.wte
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
inputs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
past = inputs[1] if len(inputs) > 1 else past
attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
head_mask = inputs[5] if len(inputs) > 5 else head_mask
inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds
assert len(inputs) <= 7, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
past = inputs.get("past", past)
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 7, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids, mode="embedding")
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode="embedding")
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask, head_mask[i]], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
class TFGPT2PreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
GPT2_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
GPT2_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
)
class TFGPT2Model(TFGPT2PreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFGPT2MainLayer(config, name="transformer")
@add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)` `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2Model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2Model.from_pretrained('gpt2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
GPT2_START_DOCSTRING,
)
class TFGPT2LMHeadModel(TFGPT2PreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFGPT2MainLayer(config, name="transformer")
def get_output_embeddings(self):
return self.transformer.wte
def prepare_inputs_for_generation(self, inputs, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
inputs = tf.expand_dims(inputs[:, -1], -1)
return {"inputs": inputs, "past": past}
@add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2LMHeadModel.from_pretrained('gpt2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
GPT2_START_DOCSTRING,
)
class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
config.num_labels = 1
self.transformer = TFGPT2MainLayer(config, name="transformer")
self.multiple_choice_head = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="multiple_choice_head"
)
def get_output_embeddings(self):
return self.transformer.wte
@add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
def call(
self,
inputs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
training=False,
):
r"""
mc_token_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
lm_prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# For example purposes. Not runnable.
import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0
raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2
mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
past = inputs[1] if len(inputs) > 1 else past
attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
head_mask = inputs[5] if len(inputs) > 5 else head_mask
inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds
mc_token_ids = inputs[7] if len(inputs) > 7 else mc_token_ids
assert len(inputs) <= 8, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
past = inputs.get("past", past)
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
mc_token_ids = inputs.get("mc_token_ids", mc_token_ids)
assert len(inputs) <= 8, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None:
input_shapes = shape_list(input_ids)
else:
input_shapes = shape_list(inputs_embeds)[:-1]
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [
flat_input_ids,
past,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
head_mask,
inputs_embeds,
]
transformer_outputs = self.transformer(flat_inputs, training=training)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.wte(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training)
mc_logits = tf.squeeze(mc_logits, axis=-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
return outputs # lm logits, mc logits, presents, (all hidden_states), (attentions)
| 33,342 | 46.161245 | 169 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Transformer XL model.
"""
import logging
import tensorflow as tf
from .configuration_transfo_xl import TransfoXLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
from .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list
logger = logging.getLogger(__name__)
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP = {
"transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-tf_model.h5",
}
class TFPositionalEmbedding(tf.keras.layers.Layer):
def __init__(self, demb, **kwargs):
super().__init__(**kwargs)
self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb))
def call(self, pos_seq, bsz=None):
sinusoid_inp = tf.einsum("i,j->ij", pos_seq, self.inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
if bsz is not None:
return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
else:
return pos_emb[:, None, :]
class TFPositionwiseFF(tf.keras.layers.Layer):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs):
super().__init__(**kwargs)
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.layer_1 = tf.keras.layers.Dense(
d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name="CoreNet_._0"
)
self.drop_1 = tf.keras.layers.Dropout(dropout)
self.layer_2 = tf.keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name="CoreNet_._3")
self.drop_2 = tf.keras.layers.Dropout(dropout)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
self.pre_lnorm = pre_lnorm
def call(self, inp, training=False):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.layer_norm(inp)
core_out = self.layer_1(core_out)
core_out = self.drop_1(core_out, training=training)
core_out = self.layer_2(core_out)
core_out = self.drop_2(core_out, training=training)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.layer_1(inp)
core_out = self.drop_1(core_out, training=training)
core_out = self.layer_2(core_out)
core_out = self.drop_2(core_out, training=training)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class TFRelPartialLearnableMultiHeadAttn(tf.keras.layers.Layer):
def __init__(
self,
n_head,
d_model,
d_head,
dropout,
dropatt=0,
tgt_len=None,
ext_len=None,
mem_len=None,
pre_lnorm=False,
r_r_bias=None,
r_w_bias=None,
output_attentions=False,
layer_norm_epsilon=1e-5,
init_std=0.02,
**kwargs
):
super().__init__(**kwargs)
self.output_attentions = output_attentions
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = tf.keras.layers.Dense(
3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="qkv_net"
)
self.drop = tf.keras.layers.Dropout(dropout)
self.dropatt = tf.keras.layers.Dropout(dropatt)
self.o_net = tf.keras.layers.Dense(
d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name="o_net"
)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is not None and r_w_bias is not None: # Biases are shared
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
else:
self.r_r_bias = None
self.r_w_bias = None
self.r_net = tf.keras.layers.Dense(
self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="r_net"
)
def build(self, input_shape):
if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared
self.r_r_bias = self.add_weight(
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
)
self.r_w_bias = self.add_weight(
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
)
super().build(input_shape)
def _rel_shift(self, x):
x_size = shape_list(x)
x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])
x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])
x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
x = tf.reshape(x, x_size)
return x
def call(self, inputs, training=False):
w, r, attn_mask, mems, head_mask = inputs
qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]
if mems is not None:
cat = tf.concat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
klen = shape_list(w_head_k)[0]
w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = tf.einsum("ibnd,jbnd->ijbn", rw_head_q, w_head_k) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = tf.einsum("ibnd,jnd->ijbn", rr_head_q, r_head_k) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score = attn_score * self.scale
# compute attention probability
if attn_mask is not None:
attn_mask_t = attn_mask[:, :, None, None]
attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t
# [qlen x klen x bsz x n_head]
attn_prob = tf.nn.softmax(attn_score, axis=1)
attn_prob = self.dropatt(attn_prob, training=training)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# compute attention vector
attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, w_head_v)
# [qlen x bsz x n_head x d_head]
attn_vec_sizes = shape_list(attn_vec)
attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head))
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out, training=training)
if self.pre_lnorm:
# residual connection
outputs = [w + attn_out]
else:
# residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
class TFRelPartialLearnableDecoderLayer(tf.keras.layers.Layer):
def __init__(
self,
n_head,
d_model,
d_head,
d_inner,
dropout,
tgt_len=None,
ext_len=None,
mem_len=None,
dropatt=0.0,
pre_lnorm=False,
r_w_bias=None,
r_r_bias=None,
output_attentions=False,
layer_norm_epsilon=1e-5,
init_std=0.02,
**kwargs
):
super().__init__(**kwargs)
self.dec_attn = TFRelPartialLearnableMultiHeadAttn(
n_head,
d_model,
d_head,
dropout,
tgt_len=tgt_len,
ext_len=ext_len,
mem_len=mem_len,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
r_w_bias=r_w_bias,
r_r_bias=r_r_bias,
init_std=init_std,
output_attentions=output_attentions,
layer_norm_epsilon=layer_norm_epsilon,
name="dec_attn",
)
self.pos_ff = TFPositionwiseFF(
d_model,
d_inner,
dropout,
pre_lnorm=pre_lnorm,
init_std=init_std,
layer_norm_epsilon=layer_norm_epsilon,
name="pos_ff",
)
def call(self, inputs, training=False):
dec_inp, r, dec_attn_mask, mems, head_mask = inputs
attn_outputs = self.dec_attn([dec_inp, r, dec_attn_mask, mems, head_mask], training=training)
ff_output = self.pos_ff(attn_outputs[0], training=training)
outputs = [ff_output] + attn_outputs[1:]
return outputs
class TFAdaptiveEmbedding(tf.keras.layers.Layer):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs):
super().__init__(**kwargs)
self.n_token = n_token
self.d_embed = d_embed
self.init_std = init_std
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = []
self.emb_projs = []
if div_val == 1:
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(
tf.keras.layers.Embedding(
r_idx - l_idx,
d_emb_i,
embeddings_initializer=get_initializer(init_std),
name="emb_layers_._{}".format(i),
)
)
def build(self, input_shape):
for i in range(len(self.cutoffs)):
d_emb_i = self.d_embed // (self.div_val ** i)
self.emb_projs.append(
self.add_weight(
shape=(d_emb_i, self.d_proj),
initializer=get_initializer(self.init_std),
trainable=True,
name="emb_projs_._{}".format(i),
)
)
super().build(input_shape)
def call(self, inp):
if self.div_val == 1:
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
else:
inp_flat = tf.reshape(inp, (-1,))
emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj])
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = tf.einsum("id,de->ie", emb_i, self.emb_projs[i])
mask_idx = tf.cast(tf.where(mask_i), dtype=tf.int64)
emb_flat += tf.scatter_nd(mask_idx, emb_i, tf.cast(shape_list(emb_flat), dtype=tf.int64))
embed_shape = shape_list(inp) + [self.d_proj]
embed = tf.reshape(emb_flat, embed_shape)
embed *= self.emb_scale
return embed
@keras_serializable
class TFTransfoXLMainLayer(tf.keras.layers.Layer):
config_class = TransfoXLConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.n_token = config.vocab_size
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.untie_r = config.untie_r
self.word_emb = TFAdaptiveEmbedding(
config.vocab_size,
config.d_embed,
config.d_model,
config.cutoffs,
div_val=config.div_val,
init_std=config.init_std,
name="word_emb",
)
self.drop = tf.keras.layers.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
self.layers = []
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
TFRelPartialLearnableDecoderLayer(
config.n_head,
config.d_model,
config.d_head,
config.d_inner,
config.dropout,
tgt_len=config.tgt_len,
ext_len=config.ext_len,
mem_len=config.mem_len,
dropatt=config.dropatt,
pre_lnorm=config.pre_lnorm,
r_w_bias=None if self.untie_r else self.r_w_bias,
r_r_bias=None if self.untie_r else self.r_r_bias,
output_attentions=self.output_attentions,
layer_norm_epsilon=config.layer_norm_epsilon,
init_std=config.init_std,
name="layers_._{}".format(i),
)
)
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = TFPositionalEmbedding(self.d_model, name="pos_emb")
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
def build(self, input_shape):
if not self.untie_r:
self.r_w_bias = self.add_weight(
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
)
self.r_r_bias = self.add_weight(
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
)
super().build(input_shape)
def get_input_embeddings(self):
return self.word_emb
def _resize_token_embeddings(self, new_num_tokens):
return self.word_emb
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def _prune_heads(self, heads):
raise NotImplementedError
def init_mems(self, bsz):
if self.mem_len > 0:
mems = []
for i in range(self.n_layer):
empty = tf.zeros([self.mem_len, bsz, self.d_model])
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, mlen, qlen):
# does not deal with None
if mems is None:
return None
# mems is not None
assert len(hids) == len(mems), "len(hids) != len(mems)"
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = tf.concat([mems[i], hids[i]], axis=0)
tf.stop_gradient(cat)
new_mems.append(cat[beg_idx:end_idx])
return new_mems
def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
mems = inputs[1] if len(inputs) > 1 else mems
head_mask = inputs[2] if len(inputs) > 2 else head_mask
inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds
assert len(inputs) <= 4, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
mems = inputs.get("mems", mems)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 4, "Too many inputs."
else:
input_ids = inputs
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = tf.transpose(input_ids, perm=(1, 0))
qlen, bsz = shape_list(input_ids)
elif inputs_embeds is not None:
inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
qlen, bsz = shape_list(inputs_embeds)[:2]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if mems is None:
mems = self.init_mems(bsz)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.n_layer
if inputs_embeds is not None:
word_emb = inputs_embeds
else:
word_emb = self.word_emb(input_ids)
mlen = shape_list(mems[0])[0] if mems is not None else 0
klen = mlen + qlen
attn_mask = tf.ones([qlen, qlen])
mask_u = tf.linalg.band_part(attn_mask, 0, -1)
mask_dia = tf.linalg.band_part(attn_mask, 0, 0)
attn_mask_pad = tf.zeros([qlen, mlen])
dec_attn_mask = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
if self.same_length:
mask_l = tf.linalg.band_part(attn_mask, -1, 0)
dec_attn_mask = tf.concat([dec_attn_mask[:, :qlen] + mask_l - mask_dia, dec_attn_mask[:, qlen:]], 1)
# ::: PyTorch masking code for reference :::
# if self.same_length:
# all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)
# mask_len = klen - self.mem_len
# if mask_len > 0:
# mask_shift_len = qlen - mask_len
# else:
# mask_shift_len = qlen
# dec_attn_mask = (torch.triu(all_ones, 1+mlen)
# + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
# else:
# dec_attn_mask = torch.triu(
# word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1+mlen)[:,:,None]
hids = []
attentions = []
if self.attn_type == 0: # default
pos_seq = tf.range(klen - 1, -1, -1.0)
if self.clamp_len > 0:
pos_seq = tf.minimum(pos_seq, self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb, training=training)
pos_emb = self.drop(pos_emb, training=training)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
layer_outputs = layer([core_out, pos_emb, dec_attn_mask, mems_i, head_mask[i]], training=training)
core_out = layer_outputs[0]
if self.output_attentions:
attentions.append(layer_outputs[1])
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
core_out = self.drop(core_out, training=training)
new_mems = self._update_mems(hids, mems, mlen, qlen)
# We transpose back here to shape [bsz, len, hidden_dim]
outputs = [tf.transpose(core_out, perm=(1, 0, 2)), new_mems]
if self.output_hidden_states:
# Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
hids.append(core_out)
hids = list(tf.transpose(t, perm=(1, 0, 2)) for t in hids)
outputs.append(hids)
if self.output_attentions:
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
attentions = list(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
outputs.append(attentions)
return outputs # last hidden state, new_mems, (all hidden states), (all attentions)
class TFTransfoXLPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = TransfoXLConfig
pretrained_model_archive_map = TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
TRANSFO_XL_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.TransfoXLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
TRANSFO_XL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.TransfoXLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
TRANSFO_XL_START_DOCSTRING,
)
class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFTransfoXLMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import TransfoXLTokenizer, TFTransfoXLModel
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TFTransfoXLModel.from_pretrained('transfo-xl-wt103')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states, mems = outputs[:2]
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
class TFTransfoXLLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""The Transformer-XL Model with a language modeling head on top
(adaptive softmax with weights tied to the adaptive input embeddings)""",
TRANSFO_XL_START_DOCSTRING,
)
class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = TFTransfoXLMainLayer(config, name="transformer")
self.sample_softmax = config.sample_softmax
assert (
self.sample_softmax <= 0
), "Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310"
self.crit = TFAdaptiveSoftmaxMask(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name="crit"
)
def get_output_embeddings(self):
""" Double-check if you are using adaptive softmax.
"""
if len(self.crit.out_layers) > 0:
return self.crit.out_layers[-1]
return None
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, bsz):
return self.transformer.init_mems(bsz)
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, labels=None, training=False):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import TransfoXLTokenizer, TFTransfoXLLMHeadModel
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores, mems = outputs[:2]
"""
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
mems = inputs[1] if len(inputs) > 1 else mems
head_mask = inputs[2] if len(inputs) > 2 else head_mask
inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds
labels = inputs[4] if len(inputs) > 4 else labels
assert len(inputs) <= 5, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
mems = inputs.get("mems", mems)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
labels = inputs.get("labels", labels)
assert len(inputs) <= 5, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None:
bsz, tgt_len = shape_list(input_ids)[:2]
else:
bsz, tgt_len = shape_list(inputs_embeds)[:2]
transformer_outputs = self.transformer([input_ids, mems, head_mask, inputs_embeds], training=training)
last_hidden = transformer_outputs[0]
pred_hid = last_hidden[:, -tgt_len:]
outputs = transformer_outputs[1:]
softmax_output = self.crit([pred_hid, labels], training=training)
outputs = [softmax_output] + outputs
return outputs # logits, new_mems, (all hidden states), (all attentions)
def prepare_inputs_for_generation(self, inputs, past, **model_kwargs):
inputs = {"inputs": inputs}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
| 35,811 | 40.545244 | 159 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_t5.py | # coding=utf-8
# Copyright 2018 T5 Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 T5 model. """
import copy
import itertools
import logging
import math
import tensorflow as tf
from .configuration_t5 import T5Config
from .file_utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, shape_list
logger = logging.getLogger(__name__)
TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP = {
"t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-small-tf_model.h5",
"t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-base-tf_model.h5",
"t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-large-tf_model.h5",
"t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-3b-tf_model.h5",
"t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-11b-tf_model.h5",
}
####################################################
# TF 2.0 Models are constructed using Keras imperative API by sub-classing
# - tf.keras.layers.Layer for the layers and
# - TFPreTrainedModel for the models (it-self a sub-class of tf.keras.Model)
####################################################
class TFT5LayerNorm(tf.keras.layers.Layer):
def __init__(self, epsilon=1e-6, **kwargs):
""" Construct a layernorm module in the T5 style
No bias and no substraction of mean.
"""
super().__init__(**kwargs)
self.variance_epsilon = epsilon
def build(self, input_shape):
"""Build shared word embedding layer """
self.weight = self.add_weight("weight", shape=(input_shape[-1],), initializer="ones")
super().build(input_shape)
def call(self, x):
variance = tf.math.reduce_mean(tf.math.square(x), axis=-1, keepdims=True)
x = x * tf.math.rsqrt(variance + self.variance_epsilon)
return self.weight * x
class TFT5DenseReluDense(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.wi = tf.keras.layers.Dense(config.d_ff, use_bias=False, name="wi")
self.wo = tf.keras.layers.Dense(config.d_model, use_bias=False, name="wo")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
self.act = tf.keras.activations.relu
def call(self, hidden_states, training=False):
h = self.wi(hidden_states)
h = self.act(h)
h = self.dropout(h, training=training)
h = self.wo(h)
return h
class TFT5LayerFF(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.DenseReluDense = TFT5DenseReluDense(config, name="DenseReluDense")
self.layer_norm = TFT5LayerNorm(epsilon=config.layer_norm_epsilon, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
def call(self, hidden_states, training=False):
norm_x = self.layer_norm(hidden_states)
y = self.DenseReluDense(norm_x, training=training)
layer_output = hidden_states + self.dropout(y, training=training)
return layer_output
class TFT5Attention(tf.keras.layers.Layer):
NEW_ID = itertools.count()
def __init__(self, config, has_relative_attention_bias=False, **kwargs):
super().__init__(**kwargs)
self.layer_id = next(TFT5Attention.NEW_ID)
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.output_attentions = config.output_attentions
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.d_kv = config.d_kv
self.n_heads = config.num_heads
self.inner_dim = self.n_heads * self.d_kv
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="q")
self.k = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="k")
self.v = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="v")
self.o = tf.keras.layers.Dense(self.d_model, use_bias=False, name="o")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
if self.has_relative_attention_bias:
self.relative_attention_bias = tf.keras.layers.Embedding(
self.relative_attention_num_buckets, self.n_heads, name="relative_attention_bias",
)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += tf.dtypes.cast(tf.math.less(n, 0), tf.int32) * num_buckets
n = tf.math.abs(n)
else:
n = tf.math.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = tf.math.less(n, max_exact)
val_if_large = max_exact + tf.dtypes.cast(
tf.math.log(tf.dtypes.cast(n, tf.float32) / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact),
tf.int32,
)
val_if_large = tf.math.minimum(val_if_large, num_buckets - 1)
ret += tf.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen):
""" Compute binned relative position bias """
context_position = tf.range(qlen)[:, None]
memory_position = tf.range(klen)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets,
)
values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)
values = tf.expand_dims(tf.transpose(values, [2, 0, 1]), axis=0) # shape (1, num_heads, qlen, klen)
return values
def call(
self, input, mask=None, kv=None, position_bias=None, cache=None, head_mask=None, training=False,
):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = shape_list(input)
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = shape_list(kv)[1]
def shape(x):
""" projection """
return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, self.d_kv)), perm=(0, 2, 1, 3))
def unshape(x):
""" compute context """
return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.inner_dim))
q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)
v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
# q = q / math.sqrt(dim_per_head) # No scaling in T5
# scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)
scores = tf.einsum("bnqd,bnkd->bnqk", q, k) # (bs, n_heads, qlen, klen)
if position_bias is None:
if not self.has_relative_attention_bias:
raise ValueError("No position_bias provided and no weights to compute position_bias")
position_bias = self.compute_bias(qlen, klen)
if mask is not None:
position_bias = position_bias + mask
# mask = (mask == 0).expand_as(scores) # (bs, n_heads, qlen, klen)
# scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
scores += position_bias
weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
context = self.o(context)
outputs = (context,)
if self.output_attentions:
outputs = outputs + (weights,)
if self.has_relative_attention_bias:
outputs = outputs + (position_bias,)
return outputs
class TFT5LayerSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, has_relative_attention_bias=False, **kwargs):
super().__init__(**kwargs)
self.SelfAttention = TFT5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, name="SelfAttention",
)
self.layer_norm = TFT5LayerNorm(epsilon=config.layer_norm_epsilon, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
def call(
self, hidden_states, attention_mask=None, position_bias=None, head_mask=None, training=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
norm_x, mask=attention_mask, position_bias=position_bias, head_mask=head_mask, training=training,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y, training=training)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class TFT5LayerCrossAttention(tf.keras.layers.Layer):
def __init__(self, config, has_relative_attention_bias=False, **kwargs):
super().__init__(**kwargs)
self.EncDecAttention = TFT5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, name="EncDecAttention",
)
self.layer_norm = TFT5LayerNorm(epsilon=config.layer_norm_epsilon, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
def call(
self, hidden_states, kv, attention_mask=None, position_bias=None, head_mask=None, training=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
norm_x, mask=attention_mask, kv=kv, position_bias=position_bias, head_mask=head_mask, training=training,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y, training=training)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class TFT5Block(tf.keras.layers.Layer):
def __init__(self, config, has_relative_attention_bias=False, **kwargs):
super().__init__(**kwargs)
self.is_decoder = config.is_decoder
self.layer = []
self.layer.append(
TFT5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, name="layer_._0",)
)
if self.is_decoder:
self.layer.append(
TFT5LayerCrossAttention(
config, has_relative_attention_bias=has_relative_attention_bias, name="layer_._1",
)
)
self.layer.append(TFT5LayerFF(config, name="layer_._2"))
else:
self.layer.append(TFT5LayerFF(config, name="layer_._1"))
def call(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
head_mask=None,
training=False,
):
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
training=training,
)
hidden_states = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if not self.is_decoder:
hidden_states = self.layer[1](hidden_states, training=training)
else:
cross_attention_outputs = self.layer[1](
hidden_states,
kv=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
head_mask=head_mask,
training=training,
)
hidden_states = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:]
hidden_states = self.layer[2](hidden_states, training=training)
outputs = (hidden_states,) + outputs # add attentions if we output them
return outputs # hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
class _NoLayerEmbedTokens(object):
"""
this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer'
class to avoid problem with weight restoring. Also it makes sure that the layer is
called from the correct scope to avoid problem with saving/storing the correct weights
"""
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
####################################################
# The full model without a specific pretrained or finetuning head is
# provided as a tf.keras.layers.Layer usually called "TFT5MainLayer"
####################################################
class TFT5MainLayer(tf.keras.layers.Layer):
def __init__(self, config, embed_tokens=None, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.config = config
self.num_hidden_layers = config.num_layers
self.block = [
TFT5Block(config, has_relative_attention_bias=bool(i == 0), name="block_._{}".format(i),)
for i in range(config.num_layers)
]
self.final_layer_norm = TFT5LayerNorm(epsilon=config.layer_norm_epsilon, name="final_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
def _prune_heads(self, heads_to_prune):
raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
def call(
self,
input_ids,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
training=False,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, (-1, input_shape[-1]))
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to intialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
if attention_mask is None:
attention_mask = tf.fill((batch_size, seq_length), 1)
if self.is_decoder and encoder_attention_mask is None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = tf.fill((batch_size, encoder_seq_length), 1)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
attention_mask = tf.cast(attention_mask, dtype=tf.float32)
num_dims_attention_mask = len(shape_list(attention_mask))
if num_dims_attention_mask == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif num_dims_attention_mask == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
seq_ids = tf.range(seq_length)
causal_mask = tf.less_equal(
tf.tile(seq_ids[None, None, :], (batch_size, seq_length, 1)), seq_ids[None, :, None],
)
causal_mask = tf.cast(causal_mask, dtype=tf.float32)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposistion
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
# extended_attention_mask = tf.math.equal(extended_attention_mask,
# tf.transpose(extended_attention_mask, perm=(-1, -2)))
extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
if self.is_decoder:
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=tf.float32)
num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
if num_dims_encoder_attention_mask == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if num_dims_encoder_attention_mask == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposistion
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
# tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
all_hidden_states = ()
all_attentions = ()
position_bias = None
encoder_decoder_position_bias = None
hidden_states = inputs_embeds
for i, layer_module in enumerate(self.block):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
head_mask=head_mask[i],
training=training,
)
hidden_states = layer_outputs[0]
if i == 0:
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[2 if self.output_attentions else 1]
if self.is_decoder:
encoder_decoder_position_bias = layer_outputs[4 if self.output_attentions else 2]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
####################################################
# TFT5PreTrainedModel is a sub-class of tf.keras.Model
# which take care of loading and saving pretrained weights
# and various common utilities.
# Here you just need to specify a few (self-explanatory)
# pointers for your model.
####################################################
class TFT5PreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = T5Config
pretrained_model_archive_map = TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
input_ids = tf.constant(DUMMY_INPUTS)
input_mask = tf.constant(DUMMY_MASK)
dummy_inputs = {
"inputs": input_ids,
"decoder_input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
T5_START_DOCSTRING = r""" The T5 model was proposed in
`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`_
by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.
It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.
This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. _`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`:
https://arxiv.org/abs/1910.10683
.. _`tf.keras.Model`:
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
Note on the model inputs:
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :
- a single Tensor with input_ids only and nothing else: `model(inputs_ids)
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associaed to the input names given in the docstring:
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
inputs are usually used as a `dict` (see T5 description above for more information) containing all the following.
inputs (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
T5 is a model with relative position embeddings so you should be able to pad the inputs on
the right or the left.
Indices can be obtained using :class:`transformers.T5Tokenizer`.
To know more on how to prepare :obj:`input_ids` for pre-training take a look at
`T5 Training <./t5.html#training>`_ .
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`, defaults to :obj:`None`):
Provide for sequence to sequence training. T5 uses the pad_token_id as the starting token for decoder_input_ids generation.
attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
encoder_outputs (:obj:`tuple(tuple(tf.FloatTensor)`, `optional`, defaults to :obj:`None`):
Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`)
`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder.
Used in the cross-attention of the decoder.
decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`):
Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
To know more on how to prepare :obj:`decoder_input_ids` for pre-training take a look at
`T5 Training <./t5.html#training>`_ .
head_mask: (:obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.",
T5_START_DOCSTRING,
)
class TFT5Model(TFT5PreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, name="shared")
# retrieve correct absolute scope for embed token wrapper
with tf.compat.v1.variable_scope("shared") as shared_abs_scope_name:
pass
embed_tokens = _NoLayerEmbedTokens(self.shared, abs_scope_name=shared_abs_scope_name)
encoder_config = copy.deepcopy(config)
self.encoder = TFT5MainLayer(encoder_config, embed_tokens, name="encoder")
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
self.decoder = TFT5MainLayer(decoder_config, embed_tokens, name="decoder")
def get_input_embeddings(self):
return self.shared
def get_output_embeddings(self):
return self.shared
@add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.T5Config`) and inputs.
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import T5Tokenizer, TFT5Model
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = TFT5Model.from_pretrained('t5-small')
input_ids = tokenizer.encode("Hello, my dog is cute", return_tensors="tf") # Batch size 1
outputs = model(input_ids, decoder_input_ids=input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if isinstance(inputs, dict):
kwargs.update(inputs)
else:
kwargs["inputs"] = inputs
# retrieve arguments
input_ids = kwargs.get("inputs", None)
decoder_input_ids = kwargs.get("decoder_input_ids", None)
attention_mask = kwargs.get("attention_mask", None)
encoder_outputs = kwargs.get("encoder_outputs", None)
decoder_attention_mask = kwargs.get("decoder_attention_mask", None)
inputs_embeds = kwargs.get("inputs_embeds", None)
decoder_inputs_embeds = kwargs.get("decoder_inputs_embeds", None)
head_mask = kwargs.get("head_mask", None)
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
)
return decoder_outputs + encoder_outputs
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class TFT5ForConditionalGeneration(TFT5PreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model_dim = config.d_model
self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, name="shared")
# retrieve correct absolute scope for embed token wrapper
with tf.compat.v1.variable_scope("shared") as shared_abs_scope_name:
pass
embed_tokens = _NoLayerEmbedTokens(self.shared, abs_scope_name=shared_abs_scope_name)
encoder_config = copy.deepcopy(config)
self.encoder = TFT5MainLayer(encoder_config, embed_tokens, name="encoder")
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
self.decoder = TFT5MainLayer(decoder_config, embed_tokens, name="decoder")
def get_input_embeddings(self):
return self.shared
def get_output_embeddings(self):
return self.shared
def get_encoder(self):
return self.encoder
@add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.T5Config`) and inputs.
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`lm_label` is provided):
Classification loss (cross entropy).
prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention.
Examples::
from transformers import T5Tokenizer, TFT5ForConditionalGeneration
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = TFT5ForConditionalGeneration.from_pretrained('t5-small')
input_ids = tokenizer.encode("Hello, my dog is cute", return_tensors="tf") # Batch size 1
outputs = model(input_ids, decoder_input_ids=input_ids)
prediction_scores = outputs[0]
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = TFT5ForConditionalGeneration.from_pretrained('t5-small')
input_ids = tokenizer.encode("summarize: Hello, my dog is cute", return_tensors="tf") # Batch size 1
model.generate(input_ids)
"""
if isinstance(inputs, dict):
kwargs.update(inputs)
else:
kwargs["inputs"] = inputs
# retrieve arguments
input_ids = kwargs.get("inputs", None)
decoder_input_ids = kwargs.get("decoder_input_ids", None)
attention_mask = kwargs.get("attention_mask", None)
encoder_outputs = kwargs.get("encoder_outputs", None)
decoder_attention_mask = kwargs.get("decoder_attention_mask", None)
inputs_embeds = kwargs.get("inputs_embeds", None)
decoder_inputs_embeds = kwargs.get("decoder_inputs_embeds", None)
head_mask = kwargs.get("head_mask", None)
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
)
sequence_output = decoder_outputs[0] * (self.model_dim ** -0.5)
embed_tokens = self.get_output_embeddings()
lm_logits = embed_tokens(sequence_output, mode="linear")
decoder_outputs = (lm_logits,) + decoder_outputs[1:]
return decoder_outputs + encoder_outputs
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
# first step
if type(past) is tuple:
encoder_outputs = past
else:
encoder_outputs = (past,)
return {
"inputs": None, # inputs don't have to be defined, but still need to be passed to make Keras.layer.__call__ happy
"decoder_input_ids": input_ids, # input_ids are the decoder_input_ids
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
}
def _reorder_cache(self, past, beam_idx):
# past does not have to be re-ordered for T5.
return past
| 42,692 | 46.331486 | 207 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import logging
from collections import OrderedDict
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BertConfig,
CTRLConfig,
DistilBertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLNetConfig,
)
from .configuration_utils import PretrainedConfig
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TFAlbertForMaskedLM,
TFAlbertForSequenceClassification,
TFAlbertModel,
)
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertModel,
)
from .modeling_tf_ctrl import TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, TFCTRLLMHeadModel, TFCTRLModel
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
from .modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, TFGPT2LMHeadModel, TFGPT2Model
from .modeling_tf_openai import TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaModel,
)
from .modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP, TFT5ForConditionalGeneration, TFT5Model
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMModel,
TFXLMWithLMHeadModel,
)
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetModel,
)
logger = logging.getLogger(__name__)
TF_ALL_PRETRAINED_MODEL_ARCHIVE_MAP = dict(
(key, value)
for pretrained_map in [
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP,
]
for key, value, in pretrained_map.items()
)
TF_MODEL_MAPPING = OrderedDict(
[
(T5Config, TFT5Model),
(DistilBertConfig, TFDistilBertModel),
(AlbertConfig, TFAlbertModel),
(RobertaConfig, TFRobertaModel),
(BertConfig, TFBertModel),
(OpenAIGPTConfig, TFOpenAIGPTModel),
(GPT2Config, TFGPT2Model),
(TransfoXLConfig, TFTransfoXLModel),
(XLNetConfig, TFXLNetModel),
(XLMConfig, TFXLMModel),
(CTRLConfig, TFCTRLModel),
]
)
TF_MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
[
(T5Config, TFT5ForConditionalGeneration),
(DistilBertConfig, TFDistilBertForMaskedLM),
(AlbertConfig, TFAlbertForMaskedLM),
(RobertaConfig, TFRobertaForMaskedLM),
(BertConfig, TFBertForPreTraining),
(OpenAIGPTConfig, TFOpenAIGPTLMHeadModel),
(GPT2Config, TFGPT2LMHeadModel),
(TransfoXLConfig, TFTransfoXLLMHeadModel),
(XLNetConfig, TFXLNetLMHeadModel),
(XLMConfig, TFXLMWithLMHeadModel),
(CTRLConfig, TFCTRLLMHeadModel),
]
)
TF_MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
[
(T5Config, TFT5ForConditionalGeneration),
(DistilBertConfig, TFDistilBertForMaskedLM),
(AlbertConfig, TFAlbertForMaskedLM),
(RobertaConfig, TFRobertaForMaskedLM),
(BertConfig, TFBertForMaskedLM),
(OpenAIGPTConfig, TFOpenAIGPTLMHeadModel),
(GPT2Config, TFGPT2LMHeadModel),
(TransfoXLConfig, TFTransfoXLLMHeadModel),
(XLNetConfig, TFXLNetLMHeadModel),
(XLMConfig, TFXLMWithLMHeadModel),
(CTRLConfig, TFCTRLLMHeadModel),
]
)
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
(DistilBertConfig, TFDistilBertForSequenceClassification),
(AlbertConfig, TFAlbertForSequenceClassification),
(RobertaConfig, TFRobertaForSequenceClassification),
(BertConfig, TFBertForSequenceClassification),
(XLNetConfig, TFXLNetForSequenceClassification),
(XLMConfig, TFXLMForSequenceClassification),
]
)
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
[
(DistilBertConfig, TFDistilBertForQuestionAnswering),
(BertConfig, TFBertForQuestionAnswering),
(XLNetConfig, TFXLNetForQuestionAnsweringSimple),
(XLMConfig, TFXLMForQuestionAnsweringSimple),
]
)
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
[
(DistilBertConfig, TFDistilBertForTokenClassification),
(RobertaConfig, TFRobertaForTokenClassification),
(BertConfig, TFBertForTokenClassification),
(XLNetConfig, TFXLNetForTokenClassification),
]
)
class TFAutoModel(object):
r"""
:class:`~transformers.TFAutoModel` is a generic model class
that will be instantiated as one of the base model classes of the library
when created with the `TFAutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5Model (T5 model)
- contains `distilbert`: TFDistilBertModel (DistilBERT model)
- contains `roberta`: TFRobertaModel (RoBERTa model)
- contains `bert`: TFBertModel (Bert model)
- contains `openai-gpt`: TFOpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2Model (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLModel (Transformer-XL model)
- contains `xlnet`: TFXLNetModel (XLNet model)
- contains `xlm`: TFXLMModel (XLM model)
- contains `ctrl`: TFCTRLModel (CTRL model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModel is designed to be instantiated "
"using the `TFAutoModel.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModel.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: TFDistilBertModel (DistilBERT model)
- isInstance of `roberta` configuration class: TFRobertaModel (RoBERTa model)
- isInstance of `bert` configuration class: TFBertModel (Bert model)
- isInstance of `openai-gpt` configuration class: TFOpenAIGPTModel (OpenAI GPT model)
- isInstance of `gpt2` configuration class: TFGPT2Model (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: TFCTRLModel (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: TFTransfoXLModel (Transformer-XL model)
- isInstance of `xlnet` configuration class: TFXLNetModel (XLNet model)
- isInstance of `xlm` configuration class: TFXLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = TFAutoModel.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in TF_MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in TF_MODEL_MAPPING.keys())
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the base model classes of the library
from a pre-trained model configuration.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5Model (T5 model)
- contains `distilbert`: TFDistilBertModel (DistilBERT model)
- contains `roberta`: TFRobertaModel (RoBERTa model)
- contains `bert`: TFTFBertModel (Bert model)
- contains `openai-gpt`: TFOpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2Model (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLModel (Transformer-XL model)
- contains `xlnet`: TFXLNetModel (XLNet model)
- contains `ctrl`: TFCTRLModel (CTRL model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in TF_MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in TF_MODEL_MAPPING.keys())
)
)
class TFAutoModelForPreTraining(object):
r"""
:class:`~transformers.TFAutoModelForPreTraining` is a generic model class
that will be instantiated as one of the model classes of the library -with the architecture used for pretraining this model– when created with the `TFAutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)`
class method.
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModelForPreTraining is designed to be instantiated "
"using the `TFAutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModelForPreTraining.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: :class:`~transformers.TFDistilBertModelForMaskedLM` (DistilBERT model)
- isInstance of `roberta` configuration class: :class:`~transformers.TFRobertaModelForMaskedLM` (RoBERTa model)
- isInstance of `bert` configuration class: :class:`~transformers.TFBertForPreTraining` (Bert model)
- isInstance of `openai-gpt` configuration class: :class:`~transformers.TFOpenAIGPTLMHeadModel` (OpenAI GPT model)
- isInstance of `gpt2` configuration class: :class:`~transformers.TFGPT2ModelLMHeadModel` (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: :class:`~transformers.TFCTRLModelLMHeadModel` (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: :class:`~transformers.TFTransfoXLLMHeadModel` (Transformer-XL model)
- isInstance of `xlnet` configuration class: :class:`~transformers.TFXLNetLMHeadModel` (XLNet model)
- isInstance of `xlm` configuration class: :class:`~transformers.TFXLMWithLMHeadModel` (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = TFAutoModelForPreTraining.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in TF_MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in TF_MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the model classes of the library -with the architecture used for pretraining this model– from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.TFT5ModelWithLMHead` (T5 model)
- contains `distilbert`: :class:`~transformers.TFDistilBertForMaskedLM` (DistilBERT model)
- contains `albert`: :class:`~transformers.TFAlbertForMaskedLM` (ALBERT model)
- contains `roberta`: :class:`~transformers.TFRobertaForMaskedLM` (RoBERTa model)
- contains `bert`: :class:`~transformers.TFBertForPreTraining` (Bert model)
- contains `openai-gpt`: :class:`~transformers.TFOpenAIGPTLMHeadModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.TFGPT2LMHeadModel` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TFTransfoXLLMHeadModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.TFXLNetLMHeadModel` (XLNet model)
- contains `xlm`: :class:`~transformers.TFXLMWithLMHeadModel` (XLM model)
- contains `ctrl`: :class:`~transformers.TFCTRLLMHeadModel` (Salesforce CTRL model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely received file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model.
(e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelForPreTraining.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelForPreTraining.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelForPreTraining.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in TF_MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in TF_MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
class TFAutoModelWithLMHead(object):
r"""
:class:`~transformers.TFAutoModelWithLMHead` is a generic model class
that will be instantiated as one of the language modeling model classes of the library
when created with the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5ForConditionalGeneration (T5 model)
- contains `distilbert`: TFDistilBertForMaskedLM (DistilBERT model)
- contains `roberta`: TFRobertaForMaskedLM (RoBERTa model)
- contains `bert`: TFBertForMaskedLM (Bert model)
- contains `openai-gpt`: TFOpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2LMHeadModel (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: TFXLNetLMHeadModel (XLNet model)
- contains `xlm`: TFXLMWithLMHeadModel (XLM model)
- contains `ctrl`: TFCTRLLMHeadModel (CTRL model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModelWithLMHead is designed to be instantiated "
"using the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModelWithLMHead.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model)
- isInstance of `roberta` configuration class: RobertaModel (RoBERTa model)
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `openai-gpt` configuration class: OpenAIGPTModel (OpenAI GPT model)
- isInstance of `gpt2` configuration class: GPT2Model (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: CTRLModel (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: TransfoXLModel (Transformer-XL model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `xlm` configuration class: XLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = TFAutoModelWithLMHead.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in TF_MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in TF_MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the language modeling model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: TFT5ForConditionalGeneration (T5 model)
- contains `distilbert`: TFDistilBertForMaskedLM (DistilBERT model)
- contains `roberta`: TFRobertaForMaskedLM (RoBERTa model)
- contains `bert`: TFBertForMaskedLM (Bert model)
- contains `openai-gpt`: TFOpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: TFGPT2LMHeadModel (OpenAI GPT-2 model)
- contains `transfo-xl`: TFTransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: TFXLNetLMHeadModel (XLNet model)
- contains `xlm`: TFXLMWithLMHeadModel (XLM model)
- contains `ctrl`: TFCTRLLMHeadModel (CTRL model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelWithLMHead.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in TF_MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in TF_MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
class TFAutoModelForSequenceClassification(object):
r"""
:class:`~transformers.TFAutoModelForSequenceClassification` is a generic model class
that will be instantiated as one of the sequence classification model classes of the library
when created with the `TFAutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForSequenceClassification (DistilBERT model)
- contains `roberta`: TFRobertaForSequenceClassification (RoBERTa model)
- contains `bert`: TFBertForSequenceClassification (Bert model)
- contains `xlnet`: TFXLNetForSequenceClassification (XLNet model)
- contains `xlm`: TFXLMForSequenceClassification (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModelForSequenceClassification is designed to be instantiated "
"using the `TFAutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModelForSequenceClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model)
- isInstance of `roberta` configuration class: RobertaModel (RoBERTa model)
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `xlm` configuration class: XLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the sequence classification model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForSequenceClassification (DistilBERT model)
- contains `roberta`: TFRobertaForSequenceClassification (RoBERTa model)
- contains `bert`: TFBertForSequenceClassification (Bert model)
- contains `xlnet`: TFXLNetForSequenceClassification (XLNet model)
- contains `xlm`: TFXLMForSequenceClassification (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
class TFAutoModelForQuestionAnswering(object):
r"""
:class:`~transformers.TFAutoModelForQuestionAnswering` is a generic model class
that will be instantiated as one of the question answering model classes of the library
when created with the `TFAutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForQuestionAnswering (DistilBERT model)
- contains `bert`: TFBertForQuestionAnswering (Bert model)
- contains `xlnet`: TFXLNetForQuestionAnswering (XLNet model)
- contains `xlm`: TFXLMForQuestionAnswering (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"TFAutoModelForQuestionAnswering is designed to be instantiated "
"using the `TFAutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or "
"`TFAutoModelForQuestionAnswering.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model)
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `xlm` configuration class: XLMModel (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: TFDistilBertForQuestionAnswering (DistilBERT model)
- contains `bert`: TFBertForQuestionAnswering (Bert model)
- contains `xlnet`: TFXLNetForQuestionAnswering (XLNet model)
- contains `xlm`: TFXLMForQuestionAnswering (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument.
from_pt: (`Optional`) Boolean
Set to True if the Checkpoint is a PyTorch checkpoint.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
class TFAutoModelForTokenClassification:
def __init__(self):
raise EnvironmentError(
"TFAutoModelForTokenClassification is designed to be instantiated "
"using the `TFAutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForTokenClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
The model class to instantiate is selected based on the configuration class:
- isInstance of `bert` configuration class: BertModel (Bert model)
- isInstance of `xlnet` configuration class: XLNetModel (XLNet model)
- isInstance of `distilbert` configuration class: DistilBertModel (DistilBert model)
- isInstance of `roberta` configuration class: RobteraModel (Roberta model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = TFAutoModelForTokenClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `bert`: BertForTokenClassification (Bert model)
- contains `xlnet`: XLNetForTokenClassification (XLNet model)
- contains `distilbert`: DistilBertForTokenClassification (DistilBert model)
- contains `roberta`: RobertaForTokenClassification (Roberta model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = TFAutoModelForTokenClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = TFAutoModelForTokenClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = TFAutoModelForTokenClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = TFAutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
| 68,490 | 61.663312 | 472 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import logging
import os
import typing
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
logger = logging.getLogger(__name__)
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
""" Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero with `model.reset_memory_hooks_state()`
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self):
return next(self.parameters()).device
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self):
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self):
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`nn.Module`):
A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self):
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path, postfix=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(archive_file)
else:
msg = (
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url to model weight files named one of {} but "
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
", ".join(cls.pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME],
)
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and any(
s.startswith(cls.base_model_prefix) for s in state_dict.keys()
):
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not any(
s.startswith(cls.base_model_prefix) for s in state_dict.keys()
):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys
)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys
)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
model.tie_weights() # make sure token embedding weights are still tied if needed
# Set model in evaluation mode to desactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
return model
def prepare_inputs_for_generation(self, input_ids, **kwargs):
return {"input_ids": input_ids}
def prepare_scores_for_generation(self, scores, **kwargs):
return scores
def _do_output_past(self, outputs):
"""During generation, decide whether to pass the `past` variable to the next forward pass."""
has_output_past = getattr(self.config, "output_past", False)
mem_len = getattr(self.config, "mem_len", 0)
if len(outputs) <= 1:
return False
if mem_len > 0 or has_output_past:
return True
return False
def enforce_repetition_penalty_(self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858). """
for i in range(batch_size * num_beams):
for previous_token in set(prev_output_tokens[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
@torch.no_grad()
def generate(
self,
input_ids=None,
max_length=None,
min_length=None,
do_sample=None,
early_stopping=None,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
repetition_penalty=None,
bad_words_ids=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
num_return_sequences=None,
attention_mask=None,
decoder_start_token_id=None,
):
r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
pad_token_id: (`optional`) int
Padding token. Default to specicic model pad_token_id or None if it does not exist.
bos_token_id: (`optional`) int
BOS token. Defaults to `bos_token_id` as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to `eos_token_id` as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
Return:
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
vocab_size = self.config.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
return output
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
encoder_outputs,
attention_mask,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, past=past, attention_mask=attention_mask)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
# if model has past, then set the past variable to speed up decoding
if self._do_output_past(outputs):
past = outputs[1]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(next_token_logits, batch_size, 1, input_ids, repetition_penalty)
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
next_token_logits[:, eos_token_id] = -float("inf")
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
cur_len = cur_len + 1
# if there are different sentences lengths in the batch, some batches have to be padded
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths"
# finished sents are filled with pad_token
decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(pad_token_id)
else:
decoded = input_ids
for hypo_idx, hypo in enumerate(input_ids):
decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]
return decoded
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, past=past, attention_mask=attention_mask)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._do_output_past(outputs):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits, batch_size, num_beams, input_ids, repetition_penalty,
)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solutino
scores = self.prepare_scores_for_generation(scores, cur_len=cur_len, max_length=max_length)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence or last iteration
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token if it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# Check if were done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len=cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1)
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# update current length
cur_len = cur_len + 1
# finalize all open beam hypotheses and end to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() is not eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are filled with pad_token
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
# force one of token_ids to be generated by setting prob of all other tokens to 0.
def _force_token_ids_generation(self, scores, token_ids):
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = []
for layer_past in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` and `mems` is at 2nd position
reordered_layer_past = [layer_past[:, i].unsqueeze(1).clone().detach() for i in beam_idx]
reordered_layer_past = torch.cat(reordered_layer_past, dim=1)
# check that shape matches
assert reordered_layer_past.shape == layer_past.shape
reordered_past.append(reordered_layer_past)
past = tuple(reordered_past)
return past
def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len):
# Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < no_repeat_ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - no_repeat_ngram_size
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist())
return generated_ngrams[hypo_idx].get(ngram_idx, [])
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids):
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_input_ids):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if _tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len=None):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
if cur_len is None:
cur_len = self.max_length
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super().__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(
self, hidden_states, start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None,
):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits,) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' or another string => add an activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation = (
get_activation(activation_string) if activation_string else Identity()
) # type: typing.Callable
self.first_dropout = Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, cls_index=None):
""" hidden_states: float Tensor in shape [bsz, ..., seq_len, hidden_size], the hidden-states of the last layer.
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'cls_index' and cls_index is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long,)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indicies = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indicies.long() + padding_idx
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
| 97,902 | 48.34627 | 472 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/benchmark_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import linecache
import logging
import os
import sys
from collections import defaultdict
from typing import Iterable, List, NamedTuple, Optional, Union
from .file_utils import is_tf_available, is_torch_available
if is_torch_available():
from torch.cuda import empty_cache as torch_empty_cache
if is_tf_available():
from tensorflow.python.eager import context as tf_context
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
_is_memory_tracing_enabled = False
def is_memory_tracing_enabled():
global _is_memory_tracing_enabled
return _is_memory_tracing_enabled
class Frame(NamedTuple):
""" `Frame` is a NamedTuple used to gather the current frame state.
`Frame` has the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
"""
filename: str
module: str
line_number: int
event: str
line_text: str
class UsedMemoryState(NamedTuple):
""" `UsedMemoryState` are named tuples with the following fields:
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file)
- 'cpu_memory': CPU RSS memory state *before* executing the line
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided)
"""
frame: Frame
cpu_memory: int
gpu_memory: int
class Memory(NamedTuple):
""" `Memory` NamedTuple have a single field `bytes` and
you can get a human readable string of the number of bytes by calling `__repr__`
- `byte` (integer): number of bytes,
"""
bytes: int
def __repr__(self) -> str:
return bytes_to_human_readable(self.bytes)
class MemoryState(NamedTuple):
""" `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
"""
frame: Frame
cpu: Memory
gpu: Memory
cpu_gpu: Memory
class MemorySummary(NamedTuple):
""" `MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace`
by substracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
obtained by summing repeted memory increase for a line if it's executed several times.
The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below).
Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
"""
sequential: List[MemoryState]
cumulative: List[MemoryState]
total: Memory
MemoryTrace = List[UsedMemoryState]
def start_memory_tracing(
modules_to_trace: Optional[Union[str, Iterable[str]]] = None,
modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,
events_to_trace: str = "line",
gpus_to_trace: Optional[List[int]] = None,
) -> MemoryTrace:
""" Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module.
See `../../examples/benchmarks.py for a usage example.
Current memory consumption is returned using psutil and in particular is the RSS memory
"Resident Set Size” (the non-swapped physical memory the process is using).
See https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
Args:
- `modules_to_trace`: (None, string, list/tuple of string)
if None, all events are recorded
if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.modeling_gpt2')
- `modules_not_to_trace`: (None, string, list/tuple of string)
if None, no module is avoided
if string or list of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
- `events_to_trace`: string or list of string of events to be recorded (see official python doc for `sys.settrace` for the list of events)
default to line
- `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs
Return:
- `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).
- `UsedMemoryState` are named tuples with the following fields:
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file)
- 'cpu_memory': CPU RSS memory state *before* executing the line
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided)
`Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state.
`Frame` has the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
"""
try:
import psutil
except (ImportError):
logger.warning(
"Psutil not installed, we won't log CPU memory usage. "
"Install psutil (pip install psutil) to use CPU memory tracing."
)
process = None
else:
process = psutil.Process(os.getpid())
try:
from py3nvml import py3nvml
py3nvml.nvmlInit()
devices = list(range(py3nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace
py3nvml.nvmlShutdown()
except ImportError:
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to use GPU memory tracing."
)
log_gpu = False
except (OSError, py3nvml.NVMLError):
logger.warning("Error while initializing comunication with GPU. " "We won't perform GPU memory tracing.")
log_gpu = False
else:
log_gpu = is_torch_available() or is_tf_available()
memory_trace = []
def traceit(frame, event, args):
""" Tracing method executed before running each line in a module or sub-module
Record memory allocated in a list with debugging information
"""
global _is_memory_tracing_enabled
if not _is_memory_tracing_enabled:
return traceit
# Filter events
if events_to_trace is not None:
if isinstance(events_to_trace, str) and event != events_to_trace:
return traceit
elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
return traceit
# Filter modules
name = frame.f_globals["__name__"]
if not isinstance(name, str):
return traceit
else:
# Filter whitelist of modules to trace
if modules_to_trace is not None:
if isinstance(modules_to_trace, str) and modules_to_trace not in name:
return traceit
elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
return traceit
# Filter blacklist of modules not to trace
if modules_not_to_trace is not None:
if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
return traceit
elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
return traceit
# Record current tracing state (file, location in file...)
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
line = linecache.getline(filename, lineno).rstrip()
traced_state = Frame(filename, name, lineno, event, line)
# Record current memory state (rss memory) and compute difference with previous memory state
cpu_mem = 0
if process is not None:
mem = process.memory_info()
cpu_mem = mem.rss
gpu_mem = 0
if log_gpu:
# Clear GPU caches
if is_torch_available():
torch_empty_cache()
if is_tf_available():
tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
# Sum used memory for all GPUs
py3nvml.nvmlInit()
for i in devices:
handle = py3nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = py3nvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem += meminfo.used
py3nvml.nvmlShutdown()
mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
memory_trace.append(mem_state)
return traceit
sys.settrace(traceit)
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = True
return memory_trace
def stop_memory_tracing(
memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
) -> Optional[MemorySummary]:
""" Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
Args:
- `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary
- `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory
Return:
- None if `memory_trace` is None
- `MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace`
by substracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
obtained by summing repeted memory increase for a line if it's executed several times.
The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below).
Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
`Memory` named tuple have fields
- `byte` (integer): number of bytes,
- `string` (string): same as human readable string (ex: "3.5MB")
`Frame` are namedtuple used to list the current frame state and have the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
"""
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = False
if memory_trace is not None and len(memory_trace) > 1:
memory_diff_trace = []
cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
for (frame, cpu_mem, gpu_mem), (next_frame, next_cpu_mem, next_gpu_mem) in zip(
memory_trace[:-1], memory_trace[1:]
):
cpu_mem_inc = next_cpu_mem - cpu_mem
gpu_mem_inc = next_gpu_mem - gpu_mem
cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
memory_diff_trace.append(
MemoryState(
frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc),
)
)
cumulative_memory_dict[frame][0] += cpu_mem_inc
cumulative_memory_dict[frame][1] += gpu_mem_inc
cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
cumulative_memory = sorted(
list(cumulative_memory_dict.items()), key=lambda x: x[1][2], reverse=True
) # order by the total CPU + GPU memory increase
cumulative_memory = list(
MemoryState(
frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc),
)
for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
)
if ignore_released_memory:
total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
else:
total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
total_memory = Memory(total_memory)
return MemorySummary(sequential=memory_diff_trace, cumulative=cumulative_memory, total=total_memory)
return None
def bytes_to_human_readable(memory_amount: int) -> str:
""" Utility to convert a number of bytes (int) in a human readable string (with units)
"""
for unit in ["B", "KB", "MB", "GB"]:
if memory_amount > -1024.0 and memory_amount < 1024.0:
return "{:.3f}{}".format(memory_amount, unit)
memory_amount /= 1024.0
return "{:.3f}TB".format(memory_amount)
| 15,354 | 43.897661 | 160 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT model."""
import logging
import numpy as np
import tensorflow as tf
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import (
TFConv1D,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
shape_list,
)
logger = logging.getLogger(__name__)
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tf_model.h5"
}
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.math.sigmoid(x)
ACT_FNS = {
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
}
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask, head_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super().__init__(**kwargs)
nx = config.n_embd
self.attn = TFAttention(nx, n_ctx, config, scale, name="attn")
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
self.mlp = TFMLP(4 * nx, config, name="mlp")
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
output_attn = self.attn([x, attention_mask, head_mask], training=training)
a = output_attn[0] # output_attn: a, (attentions)
n = self.ln_1(x + a)
m = self.mlp(n, training=training)
h = self.ln_2(n + m)
outputs = [h] + output_attn[1:]
return outputs # x, (attentions)
class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.tokens_embed = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed"
)
self.positions_embed = tf.keras.layers.Embedding(
config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name="positions_embed",
)
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx, config, scale=True, name="h_._{}".format(i)) for i in range(config.n_layer)]
def get_input_embeddings(self):
return self.tokens_embed
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
position_ids = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids, mode="embedding")
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding")
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
all_attentions = []
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, attention_mask, head_mask[i]], training=training)
hidden_states = outputs[0]
if self.output_attentions:
all_attentions.append(outputs[1])
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden_states), (attentions)
class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
OPENAI_GPT_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
hidden_states (:obj:`tuple(tf.Tensor)` `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
def get_output_embeddings(self):
return self.transformer.tokens_embed
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, (all hidden_states), (attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
config.num_labels = 1
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
self.multiple_choice_head = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="multiple_choice_head"
)
def get_output_embeddings(self):
return self.transformer.tokens_embed
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
training=False,
):
r"""
mc_token_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
lm_prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# For example purposes. Not runnable.
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
# Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0
raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices
mc_token_ids = tf.constant([input_ids.size(-1), input_ids.size(-1)])[None, :] # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids
assert len(inputs) <= 7, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
mc_token_ids = inputs.get("mc_token_ids", mc_token_ids)
assert len(inputs) <= 7, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None:
input_shapes = shape_list(input_ids)
else:
input_shapes = shape_list(inputs_embeds)[:-1]
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
head_mask,
inputs_embeds,
]
transformer_outputs = self.transformer(flat_inputs, training=training)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training)
mc_logits = tf.squeeze(mc_logits, axis=-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
return outputs # lm logits, mc logits, (all hidden_states), (attentions)
| 30,602 | 45.228097 | 169 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_flaubert.py | # coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Flaubert model.
"""
import logging
import random
import tensorflow as tf
from .configuration_flaubert import FlaubertConfig
from .file_utils import add_start_docstrings
from .modeling_tf_xlm import (
TFXLMForSequenceClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMWithLMHeadModel,
get_masks,
shape_list,
)
logger = logging.getLogger(__name__)
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {}
FLAUBERT_START_DOCSTRING = r"""
This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
Parameters:
config (:class:`~transformers.FlaubertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
FLAUBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
langs (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
A parallel sequence of tokens to be used to indicate the language of each token in the input.
Indices are languages ids which can be obtained from the language names by using two conversion mappings
provided in the configuration of the model (only provided for multilingual models).
More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and
the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).
See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__.
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
lengths (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Length of each sentence that can be used to avoid performing attention on padding token indices.
You can also use `attention_mask` for the same result (see above), kept here for compatbility.
Indices selected in ``[0, ..., input_ids.size(-1)]``:
cache (:obj:`Dict[str, tf.Tensor]`, `optional`, defaults to :obj:`None`):
dictionary with ``tf.Tensor`` that contains pre-computed
hidden-states (key and values in the attention blocks) as computed by the model
(see `cache` output below). Can be used to speed up sequential decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
FLAUBERT_START_DOCSTRING,
)
class TFFlaubertModel(TFXLMModel):
config_class = FlaubertConfig
pretrained_model_archive_map = TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config, *inputs, **kwargs):
super(TFFlaubertModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFFlaubertMainLayer(config, name="transformer")
class TFFlaubertMainLayer(TFXLMMainLayer):
def __init__(self, config, *inputs, **kwargs):
super(TFFlaubertMainLayer, self).__init__(config, *inputs, **kwargs)
self.layerdrop = getattr(config, "layerdrop", 0.0)
self.pre_norm = getattr(config, "pre_norm", False)
def call(
self,
inputs,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
# removed: src_enc=None, src_len=None
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
langs = inputs[2] if len(inputs) > 2 else langs
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
lengths = inputs[5] if len(inputs) > 5 else lengths
cache = inputs[6] if len(inputs) > 6 else cache
head_mask = inputs[7] if len(inputs) > 7 else head_mask
inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds
assert len(inputs) <= 9, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
langs = inputs.get("langs", langs)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
lengths = inputs.get("lengths", lengths)
cache = inputs.get("cache", cache)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 9, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
bs, slen = shape_list(input_ids)
elif inputs_embeds is not None:
bs, slen = shape_list(inputs_embeds)[:2]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if lengths is None:
if input_ids is not None:
lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1)
else:
lengths = tf.convert_to_tensor([slen] * bs, tf.int32)
# mask = input_ids != self.pad_index
# check inputs
# assert shape_list(lengths)[0] == bs
tf.debugging.assert_equal(shape_list(lengths)[0], bs)
# assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# position_ids
if position_ids is None:
position_ids = tf.expand_dims(tf.range(slen), axis=0)
else:
# assert shape_list(position_ids) == [bs, slen] # (slen, bs)
tf.debugging.assert_equal(shape_list(position_ids), [bs, slen])
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
# assert shape_list(langs) == [bs, slen] # (slen, bs)
tf.debugging.assert_equal(shape_list(langs), [bs, slen])
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.n_layers
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids)
if langs is not None and self.use_lang_emb:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = self.dropout(tensor, training=training)
tensor = tensor * mask[..., tf.newaxis]
# transformer layers
hidden_states = ()
attentions = ()
for i in range(self.n_layers):
# LayerDrop
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop):
continue
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
if not self.pre_norm:
attn_outputs = self.attentions[i]([tensor, attn_mask, None, cache, head_mask[i]], training=training)
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = self.dropout(attn, training=training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
else:
tensor_normalized = self.layer_norm1[i](tensor)
attn_outputs = self.attentions[i](
[tensor_normalized, attn_mask, None, cache, head_mask[i]], training=training
)
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = self.dropout(attn, training=training)
tensor = tensor + attn
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = F.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
if not self.pre_norm:
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
else:
tensor_normalized = self.layer_norm2[i](tensor)
tensor = tensor + self.ffns[i](tensor_normalized)
tensor = tensor * mask[..., tf.newaxis]
# Add last hidden state
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
outputs = (tensor,)
if self.output_hidden_states:
outputs = outputs + (hidden_states,)
if self.output_attentions:
outputs = outputs + (attentions,)
return outputs # outputs, (hidden_states), (attentions)
@add_start_docstrings(
"""The Flaubert Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
FLAUBERT_START_DOCSTRING,
)
class TFFlaubertWithLMHeadModel(TFXLMWithLMHeadModel):
config_class = FlaubertConfig
pretrained_model_archive_map = TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config, *inputs, **kwargs):
super(TFFlaubertWithLMHeadModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFFlaubertMainLayer(config, name="transformer")
@add_start_docstrings(
"""Flaubert Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
FLAUBERT_START_DOCSTRING,
)
class TFFlaubertForSequenceClassification(TFXLMForSequenceClassification):
config_class = FlaubertConfig
pretrained_model_archive_map = TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config, *inputs, **kwargs):
super(TFFlaubertForSequenceClassification, self).__init__(config, *inputs, **kwargs)
self.transformer = TFFlaubertMainLayer(config, name="transformer")
| 15,824 | 46.954545 | 159 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import logging
import math
import os
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .activations import gelu, gelu_new, swish
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, prune_linear_layer
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
"bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
"bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin",
"bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin",
"bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-pytorch_model.bin",
"bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-pytorch_model.bin",
"bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-pytorch_model.bin",
"bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-pytorch_model.bin",
"bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/pytorch_model.bin",
"bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/pytorch_model.bin",
"bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/pytorch_model.bin",
}
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(
attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertModel, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = causal_mask.to(
attention_mask.dtype
) # causal and attention masks must have same type with pytorch version < 1.3
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
elif encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format(
encoder_hidden_shape, encoder_attention_mask.shape
)
)
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
next_sentence_label=None,
):
r"""
masked_lm_labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False
continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForPreTraining
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[
2:
] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the left-to-right language modeling loss (next word prediction).
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
ltr_lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`lm_labels` is provided):
Next token prediction loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMaskedLM
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
# Although this may seem awkward, BertForMaskedLM supports two scenarios:
# 1. If a tensor that contains the indices of masked labels is provided,
# the cross-entropy is the MLM cross-entropy that measures the likelihood
# of predictions for masked words.
# 2. If `lm_labels` is provided we are in a causal scenario where we
# try to predict the next token for each input in the decoder.
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
if lm_labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
prediction_scores = prediction_scores[:, :-1, :].contiguous()
lm_labels = lm_labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
ltr_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), lm_labels.view(-1))
outputs = (ltr_lm_loss,) + outputs
return outputs # (masked_lm_loss), (ltr_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
):
r"""
next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
Next sequence prediction (classification) loss.
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForNextSentencePrediction
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMultipleChoice
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1))
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForTokenClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForQuestionAnswering
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_ids = tokenizer.encode(question, text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
assert answer == "a nice puppet"
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 71,014 | 46.060968 | 187 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .activations import ACT2FN
from .configuration_gpt2 import GPT2Config
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {
"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin",
"gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-pytorch_model.bin",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",
}
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(
self.ln_1(x), layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class GPT2PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
GPT2_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
GPT2_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length if `past` is None else 1
Indices of input sequence tokens in the vocabulary.
If using `past` as an input make sure that `input_ids` are those of the last position.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`, defaults to :obj:`None`):
`input_ids_length` = `sequence_length if `past` is None else 1
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
If using `past` as an input make sure that `token_type_ids` correspond to the `input_ids` of the last position.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import GPT2Tokenizer, GPT2Model
import torch
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(
hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]
)
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (presents), (all hidden_states), (attentions)
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
GPT2_START_DOCSTRING,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
return {"input_ids": input_ids, "past": past}
@add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
GPT2_START_DOCSTRING,
)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
lm_labels=None,
mc_labels=None,
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``lm_labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`multiple_choice_labels` is provided):
Multiple choice classification loss.
lm_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!)
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
| 35,231 | 46.227882 | 177 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_flaubert.py | # coding=utf-8
# Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Flaubert model, based on XLM. """
import logging
import random
import torch
from torch.nn import functional as F
from .configuration_flaubert import FlaubertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_xlm import (
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMModel,
XLMWithLMHeadModel,
get_masks,
)
logger = logging.getLogger(__name__)
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"flaubert-small-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_small_cased/pytorch_model.bin",
"flaubert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_base_uncased/pytorch_model.bin",
"flaubert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_base_cased/pytorch_model.bin",
"flaubert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_large_cased/pytorch_model.bin",
}
FLAUBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.FlaubertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
FLAUBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
lengths (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Length of each sentence that can be used to avoid performing attention on padding token indices.
You can also use `attention_mask` for the same result (see above), kept here for compatbility.
Indices selected in ``[0, ..., input_ids.size(-1)]``:
cache (:obj:`Dict[str, torch.FloatTensor]`, `optional`, defaults to :obj:`None`):
dictionary with ``torch.FloatTensor`` that contains pre-computed
hidden-states (key and values in the attention blocks) as computed by the model
(see `cache` output below). Can be used to speed up sequential decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
FLAUBERT_START_DOCSTRING,
)
class FlaubertModel(XLMModel):
config_class = FlaubertConfig
pretrained_model_archive_map = FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config): # , dico, is_encoder, with_output):
super(FlaubertModel, self).__init__(config)
self.layerdrop = getattr(config, "layerdrop", 0.0)
self.pre_norm = getattr(config, "pre_norm", False)
@add_start_docstrings_to_callable(FLAUBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import FlaubertTokenizer, FlaubertModel
import torch
tokenizer = FlaubertTokenizer.from_pretrained('flaubert-base-cased')
model = FlaubertModel.from_pretrained('flaubert-base-cased')
input_ids = torch.tensor(tokenizer.encode("Le chat mange une pomme.", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
# removed: src_enc=None, src_len=None
if input_ids is not None:
bs, slen = input_ids.size()
else:
bs, slen = inputs_embeds.size()[:-1]
if lengths is None:
if input_ids is not None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
else:
lengths = torch.LongTensor([slen] * bs)
# mask = input_ids != self.pad_index
# check inputs
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
device = input_ids.device if input_ids is not None else inputs_embeds.device
# position_ids
if position_ids is None:
position_ids = torch.arange(slen, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand((bs, slen))
else:
assert position_ids.size() == (bs, slen) # (slen, bs)
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (bs, slen) # (slen, bs)
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.n_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layers
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
if langs is not None and self.use_lang_emb and self.config.n_langs > 1:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = F.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
hidden_states = ()
attentions = ()
for i in range(self.n_layers):
# LayerDrop
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
if not self.pre_norm:
attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i])
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
else:
tensor_normalized = self.layer_norm1[i](tensor)
attn_outputs = self.attentions[i](tensor_normalized, attn_mask, cache=cache, head_mask=head_mask[i])
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = F.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
if not self.pre_norm:
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
else:
tensor_normalized = self.layer_norm2[i](tensor)
tensor = tensor + self.ffns[i](tensor_normalized)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# Add last hidden state
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
outputs = (tensor,)
if self.output_hidden_states:
outputs = outputs + (hidden_states,)
if self.output_attentions:
outputs = outputs + (attentions,)
return outputs # outputs, (hidden_states), (attentions)
@add_start_docstrings(
"""The Flaubert Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
FLAUBERT_START_DOCSTRING,
)
class FlaubertWithLMHeadModel(XLMWithLMHeadModel):
"""
This class overrides :class:`~transformers.XLMWithLMHeadModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = FlaubertConfig
pretrained_model_archive_map = FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config):
super(FlaubertWithLMHeadModel, self).__init__(config)
self.transformer = FlaubertModel(config)
self.init_weights()
@add_start_docstrings(
"""Flaubert Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
FLAUBERT_START_DOCSTRING,
)
class FlaubertForSequenceClassification(XLMForSequenceClassification):
"""
This class overrides :class:`~transformers.XLMForSequenceClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = FlaubertConfig
pretrained_model_archive_map = FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config):
super(FlaubertForSequenceClassification, self).__init__(config)
self.transformer = FlaubertModel(config)
self.init_weights()
@add_start_docstrings(
"""Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
FLAUBERT_START_DOCSTRING,
)
class FlaubertForQuestionAnsweringSimple(XLMForQuestionAnsweringSimple):
"""
This class overrides :class:`~transformers.XLMForQuestionAnsweringSimple`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = FlaubertConfig
pretrained_model_archive_map = FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config):
super(FlaubertForQuestionAnsweringSimple, self).__init__(config)
self.transformer = FlaubertModel(config)
self.init_weights()
@add_start_docstrings(
"""Flaubert Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
FLAUBERT_START_DOCSTRING,
)
class FlaubertForQuestionAnswering(XLMForQuestionAnswering):
"""
This class overrides :class:`~transformers.XLMForQuestionAnswering`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = FlaubertConfig
pretrained_model_archive_map = FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
def __init__(self, config):
super(FlaubertForQuestionAnswering, self).__init__(config)
self.transformer = FlaubertModel(config)
self.init_weights()
| 17,805 | 44.773779 | 150 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/tokenization_utils_base.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Base classes common to both the slow and the fast tokenization classes:
PreTrainedTokenizerBase (host all the user fronting encoding methodes)
Special token mixing (host the special tokens logic) and
BatchEncoding (wrap the dictionnary of output with special method for the Fast tokenizers)
"""
import copy
import json
import logging
import os
import warnings
from collections import OrderedDict, UserDict
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from tokenizers import AddedToken
from tokenizers import Encoding as EncodingFast
from .file_utils import (
add_end_docstrings,
cached_path,
hf_bucket_url,
is_remote_url,
is_tf_available,
is_torch_available,
torch_required,
)
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
# Define type aliases and NamedTuples
TextInput = str
PreTokenizedInput = List[str]
EncodedInput = List[int]
TextInputPair = Tuple[str, str]
PreTokenizedInputPair = Tuple[List[str], List[str]]
EncodedInputPair = Tuple[List[int], List[int]]
# Slow tokenizers used to be saved in three separated files
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
FULL_TOKENIZER_FILE = "tokenizer.json"
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
def _missing_(cls, value):
raise ValueError(
"%r is not a valid %s, please select one of %s"
% (value, cls.__name__, str(list(cls._value2member_map_.keys())))
)
class TruncationStrategy(ExplicitEnum):
"""
Possible values for the ``truncation`` argument in :meth:`PreTrainedTokenizerBase.__call__`.
Useful for tab-completion in an IDE.
"""
ONLY_FIRST = "only_first"
ONLY_SECOND = "only_second"
LONGEST_FIRST = "longest_first"
DO_NOT_TRUNCATE = "do_not_truncate"
class PaddingStrategy(ExplicitEnum):
"""
Possible values for the ``padding`` argument in :meth:`PreTrainedTokenizerBase.__call__`.
Useful for tab-completion in an IDE.
"""
LONGEST = "longest"
MAX_LENGTH = "max_length"
DO_NOT_PAD = "do_not_pad"
class TensorType(ExplicitEnum):
"""
Possible values for the ``return_tensors`` argument in :meth:`PreTrainedTokenizerBase.__call__`.
Useful for tab-completion in an IDE.
"""
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
class CharSpan(NamedTuple):
"""
Character span in the original string.
Args:
start (:obj:`int`): Index of the first character in the original string.
end (:obj:`int`): Index of the character following the last character in the original string.
"""
start: int
end: int
class TokenSpan(NamedTuple):
"""
Token span in an encoded string (list of tokens).
Args:
start (:obj:`int`): Index of the first token in the span.
end (:obj:`int`): Index of the token following the last token in the span.
"""
start: int
end: int
class BatchEncoding(UserDict):
"""
Holds the output of the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`
and :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.batch_encode` methods (tokens,
attention_masks, etc).
This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
utility methods to map from word/character space to token space.
Args:
data (:obj:`dict`):
Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods ('input_ids',
'attention_mask', etc.).
encoding (:obj:`tokenizers.Encoding` or :obj:`Sequence[tokenizers.Encoding]`, `optional`):
If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/character
space to token space the :obj:`tokenizers.Encoding` instance or list of instance (for batches) hold these
informations.
tensor_type (:obj:`Union[None, str, TensorType]`, `optional`):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to add a batch axis when converting to tensors (see :obj:`tensor_type` above).
"""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
tensor_type: Union[None, str, TensorType] = None,
prepend_batch_axis: bool = False,
):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis)
@property
def is_fast(self) -> bool:
"""
:obj:`bool`: Indicate whether this :class:`~transformers.BatchEncoding` was generated from the result of a
:class:`~transformers.PreTrainedTokenizerFast` or not.
"""
return self._encodings is not None
def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
"""
If the key is a string, returns the value of the dict associated to :obj:`key` ('input_ids',
'attention_mask', etc.).
If the key is an integer, get the :obj:`tokenizers.Encoding` for batch item with index :obj:`key`.
"""
if isinstance(item, str):
return self.data[item]
elif self._encodings is not None:
return self._encodings[item]
else:
raise KeyError(
"Indexing with integers (to access backend Encoding for a given batch index) "
"is not available when using Python based tokenizers"
)
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {"data": self.data, "encodings": self._encodings}
def __setstate__(self, state):
if "data" in state:
self.data = state["data"]
if "encodings" in state:
self._encodings = state["encodings"]
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
# After this point:
# Extended properties and methods only available for fast (Rust-based) tokenizers
# provided by HuggingFace tokenizers library.
@property
def encodings(self) -> Optional[List[EncodingFast]]:
"""
:obj:`Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process.
Returns :obj:`None` if the input was tokenized through Python (i.e., not a fast) tokenizer.
"""
return self._encodings
def tokens(self, batch_index: int = 0) -> List[str]:
"""
Return the list of tokens (sub-parts of the input strings after word/subword splitting and before converstion
to integer indices) at a given batch index (only works for the output of a fast tokenizer).
Args:
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
Returns:
:obj:`List[str]`: The list of tokens at that index.
"""
if not self._encodings:
raise ValueError("tokens() is not available when using Python-based tokenizers")
return self._encodings[batch_index].tokens
def words(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
Args:
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
Returns:
:obj:`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by
the tokenizer are mapped to :obj:`None` and other tokens are mapped to the index of their corresponding
word (several tokens will be mapped to the same word index if they are parts of that word).
"""
if not self._encodings:
raise ValueError("words() is not available when using Python-based tokenizers")
return self._encodings[batch_index].words
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
"""
Get the index of the word corresponding (i.e. comprising) to an encoded token
in a sequence of the batch.
Can be called as:
- ``self.token_to_word(token_index)`` if batch size is 1
- ``self.token_to_word(batch_index, token_index)`` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e., words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence.
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token in the sequence.
Returns:
:obj:`int`: Index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError("token_to_word() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int] = None) -> TokenSpan:
"""
Get the encoded token span corresponding to a word in the sequence of the batch.
Token spans are returned as a :class:`~transformers.tokenization_utils_base.TokenSpan` with:
- **start** -- Index of the first token.
- **end** -- Index of the token following the last token.
Can be called as:
- ``self.word_to_tokens(word_index)`` if batch size is 1
- ``self.word_to_tokens(batch_index, word_index)`` if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprises one sequence,
this can be the index of the word in the sequence.
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
:class:`~transformers.tokenization_utils_base.TokenSpan`
Span of tokens in the encoded sequence.
"""
if not self._encodings:
raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if word_index < 0:
word_index = self._seq_len + word_index
return TokenSpan(*(self._encodings[batch_index].word_to_tokens(word_index)))
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
"""
Get the character span corresponding to an encoded token in a sequence of the batch.
Character spans are returned as a :class:`~transformers.tokenization_utils_base.CharSpan` with:
- **start** -- Index of the first character in the original string associated to the token.
- **end** -- Index of the character following the last character in the original string associated to the
token.
Can be called as:
- ``self.token_to_chars(token_index)`` if batch size is 1
- ``self.token_to_chars(batch_index, token_index)`` if batch size is greater or equal to 1
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence.
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token or tokens in the sequence.
Returns:
:class:`~transformers.tokenization_utils_base.CharSpan`:
Span of characters in the original string.
"""
if not self._encodings:
raise ValueError("token_to_chars() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
def char_to_token(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
"""
Get the index of the token in the encoded output comprising a character
in the original string for a sequence of the batch.
Can be called as:
- ``self.char_to_token(char_index)`` if batch size is 1
- ``self.char_to_token(batch_index, char_index)`` if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
:obj:`int`: Index of the token.
"""
if not self._encodings:
raise ValueError("char_to_token() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index)
def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int] = None) -> CharSpan:
"""
Get the character span in the original string corresponding to given word in a sequence
of the batch.
Character spans are returned as a CharSpan NamedTuple with:
- start: index of the first character in the original string
- end: index of the character following the last character in the original string
Can be called as:
- ``self.word_to_chars(word_index)`` if batch size is 1
- ``self.word_to_chars(batch_index, word_index)`` if batch size is greater or equal to 1
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
:obj:`CharSpan` or :obj:`List[CharSpan]`:
Span(s) of the associated character or characters in the string.
CharSpan are NamedTuple with:
- start: index of the first character associated to the token in the original string
- end: index of the character following the last character associated to the token in the original string
"""
if not self._encodings:
raise ValueError("word_to_chars() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index)))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
"""
Get the word in the original string corresponding to a character in the original string of
a sequence of the batch.
Can be called as:
- ``self.char_to_word(char_index)`` if batch size is 1
- ``self.char_to_word(batch_index, char_index)`` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the character in the orginal string.
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the character in the orginal string.
Returns:
:obj:`int` or :obj:`List[int]`:
Index or indices of the associated encoded token(s).
"""
if not self._encodings:
raise ValueError("char_to_word() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index)
def convert_to_tensors(
self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
):
"""
Convert the inner content to tensors.
Args:
tensor_type (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`):
The type of tensors to use. If :obj:`str`, should be one of the values of the enum
:class:`~transformers.tokenization_utils_base.TensorType`. If :obj:`None`, no modification is done.
prepend_batch_axis (:obj:`int`, `optional`, defaults to :obj:`False`):
Whether or not to add the batch dimension during the conversion.
"""
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW and is_tf_available():
as_tensor = tf.constant
elif tensor_type == TensorType.PYTORCH and is_torch_available():
as_tensor = torch.tensor
elif tensor_type == TensorType.NUMPY:
as_tensor = np.asarray
else:
raise ImportError(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
tensor_type
)
)
# Do the tensor conversion in batch
for key, value in self.items():
try:
if prepend_batch_axis:
value = [value]
tensor = as_tensor(value)
# Removing this for now in favor of controling the shape with `prepend_batch_axis`
# # at-least2d
# if tensor.ndim > 2:
# tensor = tensor.squeeze(0)
# elif tensor.ndim < 2:
# tensor = tensor[None, :]
self[key] = tensor
except: # noqa E722
if key == "overflowing_tokens":
raise ValueError(
"Unable to create tensor returning overflowing tokens of different lengths. "
"Please see if a fast version of this tokenizer is available to have this feature available."
)
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length."
)
return self
@torch_required
def to(self, device: str) -> "BatchEncoding":
"""
Send all values to device by calling :obj:`v.to(device)` (PyTorch only).
Args:
device (:obj:`str` or :obj:`torch.device`): The device to put the tensors on.
Returns:
:class:`~transformers.BatchEncoding`:
The same instance of :class:`~transformers.BatchEncoding` after modification.
"""
self.data = {k: v.to(device) for k, v in self.data.items()}
return self
class SpecialTokensMixin:
"""
A mixin derived by :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast`
to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be
used to directly access these special tokens in a model-independant manner and allow to set and update the special
tokens.
Args:
bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the beginning of a sentence.
eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the end of a sentence.
unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing an out-of-vocabulary token.
sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token separating two different sentences in the same input (used by BERT for instance).
pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the class of the input (used by BERT for instance).
mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT).
additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A tuple or a list of additional special tokens.
"""
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(self, verbose=True, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
self.verbose = verbose
# We directly set the hidden value to allow initialization with special tokens
# which are not yet in the vocabulary. Necesssary for serialization/de-serialization
# TODO clean this up at some point (probably by sitching to fast tokenizers)
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple"
assert all(isinstance(t, str) for t in value), "One of the tokens is not a string"
setattr(self, key, value)
elif isinstance(value, (str, AddedToken)):
setattr(self, key, value)
else:
raise TypeError(
"special token {} has to be either str or AddedToken but got: {}".format(key, type(value))
)
def sanitize_special_tokens(self) -> int:
"""
Make sure that all the special tokens attributes of the tokenizer (:obj:`tokenizer.mask_token`,
:obj:`tokenizer.cls_token`, etc.) are in the vocabulary.
Add the missing ones to the vocabulary if needed.
Return:
:obj:`int`: The number of tokens added in the vocaulary during the operation.
"""
return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
def add_special_tokens(self, special_tokens_dict: Dict[str, Union[str, AddedToken]]) -> int:
"""
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
current vocabulary).
Using : obj:`add_special_tokens` will ensure your special tokens can be used in several ways:
- Special tokens are carefully handled by the tokenizer (they are never split).
- You can easily refer to special tokens using tokenizer class attributes like :obj:`tokenizer.cls_token`. This
makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (for instance
:class:`~transformers.BertTokenizer` :obj:`cls_token` is already registered to be :obj`'[CLS]'` and XLM's one
is also registered to be :obj:`'</s>'`).
Args:
special_tokens_dict (dictionary `str` to `str` or :obj:`tokenizers.AddedToken`):
Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``,
``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
assign the index of the ``unk_token`` to them).
Returns:
:obj:`int`: Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
if self.verbose:
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(
isinstance(t, (str, AddedToken)) for t in value
), f"Tokens {value} for key {key} should all be str or AddedToken instances"
added_tokens += self.add_tokens(value, special_tokens=True)
else:
assert isinstance(
value, (str, AddedToken)
), f"Token {value} for key {key} should be a str or an AddedToken instance"
added_tokens += self.add_tokens([value], special_tokens=True)
return added_tokens
def add_tokens(
self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Args:
new_tokens (:obj:`str`, :obj:`tokenizers.AddedToken` or a list of `str` or :obj:`tokenizers.AddedToken`):
Tokens are only added if they are not already in the vocabulary. :obj:`tokenizers.AddedToken` wraps a
string token to let you personalize its behavior: whether this token should only match against a single
word, whether this token should strip all potential whitespaces on the left side, whether this token
should strip all potential whitespaces on the right side, etc.
special_token (:obj:`bool`, `optional`, defaults to :obj:`False`):
Can be used to specify if the token is a special token. This mostly change the normalization behavior
(special tokens like CLS or [MASK] are usually not lower-cased for instance).
See details for :obj:`tokenizers.AddedToken` in HuggingFace tokenizers library.
Returns:
:obj:`int`: Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
"""
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
@property
def bos_token(self) -> str:
"""
:obj:`str`: Beginning of sentence token. Log an error if used while not having been set.
"""
if self._bos_token is None and self.verbose:
logger.error("Using bos_token, but it is not set yet.")
return None
return str(self._bos_token)
@property
def eos_token(self) -> str:
"""
:obj:`str`: End of sentence token. Log an error if used while not having been set.
"""
if self._eos_token is None and self.verbose:
logger.error("Using eos_token, but it is not set yet.")
return None
return str(self._eos_token)
@property
def unk_token(self) -> str:
"""
:obj:`str`: Unknown token. Log an error if used while not having been set.
"""
if self._unk_token is None and self.verbose:
logger.error("Using unk_token, but it is not set yet.")
return None
return str(self._unk_token)
@property
def sep_token(self) -> str:
"""
:obj:`str`: Separation token, to separate context and query in an input sequence.
Log an error if used while not having been set.
"""
if self._sep_token is None and self.verbose:
logger.error("Using sep_token, but it is not set yet.")
return None
return str(self._sep_token)
@property
def pad_token(self) -> str:
"""
:obj:`str`: Padding token. Log an error if used while not having been set.
"""
if self._pad_token is None and self.verbose:
logger.error("Using pad_token, but it is not set yet.")
return None
return str(self._pad_token)
@property
def cls_token(self) -> str:
"""
:obj:`str`: Classification token, to extract a summary of an input sequence leveraging self-attention along
the full depth of the model. Log an error if used while not having been set.
"""
if self._cls_token is None and self.verbose:
logger.error("Using cls_token, but it is not set yet.")
return None
return str(self._cls_token)
@property
def mask_token(self) -> str:
"""
:obj:`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while
not having been set.
"""
if self._mask_token is None and self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@property
def additional_special_tokens(self) -> List[str]:
"""
:obj:`List[str]`: All the additional special tokens you may want to use. Log an error if used while not having
been set.
"""
if self._additional_special_tokens is None and self.verbose:
logger.error("Using additional_special_tokens, but it is not set yet.")
return None
return [str(tok) for tok in self._additional_special_tokens]
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
@property
def bos_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns :obj:`None` if the token
has not been set.
"""
if self._bos_token is None:
return None
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns :obj:`None` if the token has
not been set.
"""
if self._eos_token is None:
return None
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the unknown token in the vocabulary. Returns :obj:`None` if the token has not been
set.
"""
if self._unk_token is None:
return None
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
sequence. Returns :obj:`None` if the token has not been set.
"""
if self._sep_token is None:
return None
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the padding token in the vocabulary. Returns :obj:`None` if the token has not been
set.
"""
if self._pad_token is None:
return None
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self) -> int:
"""
:obj:`int`: Id of the padding token type in the vocabulary.
"""
return self._pad_token_type_id
@property
def cls_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input
sequence leveraging self-attention along the full depth of the model.
Returns :obj:`None` if the token has not been set.
"""
if self._cls_token is None:
return None
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
modeling. Returns :obj:`None` if the token has not been set.
"""
if self._mask_token is None:
return None
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self) -> List[int]:
"""
:obj:`List[int]`: Ids of all the additional special tokens in the vocabulary.
Log an error if used while not having been set.
"""
return self.convert_tokens_to_ids(self.additional_special_tokens)
@property
def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
"""
:obj:`Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes
(:obj:`cls_token`, :obj:`unk_token`, etc.) to their values (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.).
Convert potential tokens of :obj:`tokenizers.AddedToken` type to string.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = str(attr_value)
return set_attr
@property
def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
"""
:obj:`Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary
mapping special token class attributes (:obj:`cls_token`, :obj:`unk_token`, etc.) to their values
(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.).
Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely
how special tokens are tokenized.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self) -> List[str]:
"""
:obj:`List[str]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes.
Convert tokens of :obj:`tokenizers.AddedToken` type to string.
"""
all_toks = [str(s) for s in self.all_special_tokens_extended]
return all_toks
@property
def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
"""
:obj:`List[Union[str, tokenizers.AddedToken]]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.)
mapped to class attributes.
Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely
how special tokens are tokenized.
"""
all_toks = []
set_attr = self.special_tokens_map_extended
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(OrderedDict.fromkeys(all_toks))
return all_toks
@property
def all_special_ids(self) -> List[int]:
"""
:obj:`List[int]`: List the ids of the special tokens(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class
attributes.
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
ENCODE_KWARGS_DOCSTRING = r"""
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls padding. Accepts the following values:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls truncation. Accepts the following values:
* :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
provided. This will truncate token by token, removing a token from the longest sequence in the pair
if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
sequence lengths greater than the model maximum admissible input size).
max_length (:obj:`int`, `optional`):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (:obj:`int`, `optional`, defaults to 0):
If set to a number along with :obj:`max_length`, the overflowing tokens returned when
:obj:`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
is_pretokenized (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the input is already tokenized.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
"""
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
return_token_type_ids (:obj:`bool`, `optional`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`__
return_attention_mask (:obj:`bool`, `optional`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return overflowing token sequences.
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter or not to return special tokens mask information.
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return :obj:`(char_start, char_end)` for each token.
This is only available on fast tokenizers inheriting from
:class:`~transformers.PreTrainedTokenizerFast`, if using Python's tokenizer, this method will raise
:obj:`NotImplementedError`.
return_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return the lengths of the encoded inputs.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to print informations and warnings.
**kwargs: passed to the :obj:`self.tokenize()` method
Return:
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
`What are input IDs? <../glossary.html#input-ids>`__
- **token_type_ids** -- List of token type ids to be fed to a model (when :obj:`return_token_type_ids=True`
or if `"token_type_ids"` is in :obj:`self.model_input_names`).
`What are token type IDs? <../glossary.html#token-type-ids>`__
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
:obj:`return_attention_mask=True` or if `"attention_mask"` is in :obj:`self.model_input_names`).
`What are attention masks? <../glossary.html#attention-mask>`__
- **overflowing_tokens** -- List of overflowing tokens sequences (when a :obj:`max_length` is specified and
:obj:`return_overflowing_tokens=True`).
- **num_truncated_tokens** -- Number of tokens truncated (when a :obj:`max_length` is specified and
:obj:`return_overflowing_tokens=True`).
- **special_tokens_mask** -- List of 0s and 1s, with 0 specifying added special tokens and 1 specifying
regual sequence tokens (when :obj:`add_special_tokens=True` and :obj:`return_special_tokens_mask=True`).
- **length** -- The length of the inputs (when :obj:`return_length=True`)
"""
INIT_TOKENIZER_DOCSTRING = r"""
Class attributes (overridden by derived classes)
- **vocab_files_names** (:obj:`Dict[str, str]`) -- A ditionary with, as keys, the ``__init__`` keyword name of
each vocabulary file required by the model, and as associated values, the filename for saving the associated
file (string).
- **pretrained_vocab_files_map** (:obj:`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the
low-level being the :obj:`short-cut-names` of the pretrained models with, as associated values, the
:obj:`url` to the associated pretrained vocabulary file.
- **max_model_input_sizes** (:obj:`Dict[str, Optinal[int]]`) -- A dictionary with, as keys, the
:obj:`short-cut-names` of the pretrained models, and as associated values, the maximum length of the sequence
inputs of this model, or :obj:`None` if the model has no maximum input size.
- **pretrained_init_configuration** (:obj:`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
:obj:`short-cut-names` of the pretrained models, and as associated values, a dictionnary of specific
arguments to pass to the ``__init__`` method of the tokenizer class for this pretrained model when loading the
tokenizer with the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`
method.
- **model_input_names** (:obj:`List[str]`) -- A list of inputs expected in the forward pass of the model.
- **padding_side** (:obj:`str`) -- The default value for the side on which the model should have padding
applied. Should be :obj:`'right'` or :obj:`'left'`.
Args:
model_max_length (:obj:`int`, `optional`):
The maximum length (in number of tokens) for the inputs to the transformer model.
When the tokenizer is loaded with
:meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`, this will be set to
the value stored for the associated model in ``max_model_input_sizes`` (see above). If no value is
provided, will default to VERY_LARGE_INTEGER (:obj:`int(1e30)`).
padding_side: (:obj:`str`, `optional`):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
model_input_names (:obj:`List[string]`, `optional`):
The list of inputs accepted by the forward pass of the model (like :obj:`"token_type_ids"` or
:obj:`"attention_mask"`). Default value is picked from the class attribute of the same name.
bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the beginning of a sentence. Will be associated to ``self.bos_token`` and
``self.bos_token_id``.
eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the end of a sentence. Will be associated to ``self.eos_token`` and
``self.eos_token_id``.
unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing an out-of-vocabulary token. Will be associated to ``self.unk_token`` and
``self.unk_token_id``.
sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token separating two different sentences in the same input (used by BERT for instance). Will be
associated to ``self.sep_token`` and ``self.sep_token_id``.
pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation. Will be associated to ``self.pad_token`` and
``self.pad_token_id``.
cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the class of the input (used by BERT for instance). Will be associated to
``self.cls_token`` and ``self.cls_token_id``.
mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT). Will be associated to ``self.mask_token`` and ``self.mask_token_id``.
additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the
tokenization process. Will be associated to ``self.additional_special_tokens`` and
``self.additional_special_tokens_ids``.
"""
PREPARE_SEQ2SEQ_BATCH_DOCSTRING = """
Arguments:
src_texts: (:obj:`list`):
list of documents to summarize or source language texts
tgt_texts: (:obj:`list`, `optional`):
list of tgt language texts or summaries.
max_length (:obj:`int`, `optional`):
Controls the maximum length for encoder inputs (documents to summarize or source language texts)
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
max_target_length (:obj:`int`, `optional`):
Controls the maximum length of decoder inputs (target language texts or summaries)
If left unset or set to :obj:`None`, this will use the max_length value.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls padding. Accepts the following values:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`):
Activates and controls truncation. Accepts the following values:
* :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
provided. This will truncate token by token, removing a token from the longest sequence in the pair
if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
sequence lengths greater than the model maximum admissible input size).
Return:
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
- **input_ids** -- List of token ids to be fed to the encoder.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- **decoder_input_ids** -- List of token ids to be fed to the decoder.
- **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder.
This does not include causal mask, which is built by the model.
The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``,
will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys.
"""
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizerBase(SpecialTokensMixin):
"""
Base class for :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast`.
Handles shared (mostly boiler plate) methods for those two classes.
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
max_model_input_sizes: Dict[str, Optional[int]] = {}
model_input_names: List[str] = ["token_type_ids", "attention_mask"]
padding_side: str = "right"
def __init__(self, **kwargs):
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = kwargs
# For backward compatibility we fallback to set model_max_length from max_len if provided
model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
# Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
assert self.padding_side in [
"right",
"left",
], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
super().__init__(**kwargs)
@property
def max_len(self) -> int:
"""
:obj:`int`: **Deprecated** Kept here for backward compatibility. Now renamed to :obj:`model_max_length` to
avoid ambiguity.
"""
warnings.warn(
"The `max_len` attribute has been deprecated and will be removed in a future version, use `model_max_length` instead.",
FutureWarning,
)
return self.model_max_length
@property
def max_len_single_sentence(self) -> int:
"""
:obj:`int`: The maximum length of a sentence that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@property
def max_len_sentences_pair(self) -> int:
"""
:obj:`int`: The maximum combined length of a pair of sentences that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_single_sentence'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase` (or a derived class) from
a predefined tokenizer.
Args:
pretrained_model_name_or_path (:obj:`str`):
Can be either:
- A string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved
using the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`
method, e.g., ``./my_model_directory/``.
- (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
``./my_model_directory/vocab.txt``.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Attempt to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
inputs (additional positional arguments, `optional`):
Will be passed along to the Tokenizer ``__init__`` method.
kwargs (additional keyword arguments, `optional`):
Will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like
``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``,
``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__`` for more details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizerBase` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (
cls.pretrained_init_configuration
and pretrained_model_name_or_path in cls.pretrained_init_configuration
):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy()
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path
)
)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
"Calling {}.from_pretrained() with the path to a single file or url is not supported."
"Use a model identifier or the path to a directory instead.".format(cls.__name__)
)
logger.warning(
"Calling {}.from_pretrained() with the path to a single file or url is deprecated".format(
cls.__name__
)
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
"full_tokenizer_file": FULL_TOKENIZER_FILE,
}
# Look for the tokenizer files
for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
else:
full_file_name = hf_bucket_url(
pretrained_model_name_or_path, filename=file_name, use_cdn=False
)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = (
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path or url to a directory containing vocabulary files "
"named {}, but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
raise EnvironmentError(msg)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files "
"named {} but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id]))
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# If there is a complementary special token map, load it
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if isinstance(value, dict):
value = AddedToken(**value)
elif isinstance(value, list):
value = [AddedToken(**token) if isinstance(token, dict) else token for token in value]
setattr(tokenizer, key, value)
# Add supplementary tokens.
special_tokens = tokenizer.all_special_tokens
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
# Sort added tokens by index
added_tok_encoder_sorted = list(sorted(added_tok_encoder.items(), key=lambda x: x[1]))
for token, index in added_tok_encoder_sorted:
assert index == len(tokenizer), (
f"Non-consecutive added token '{token}' found. "
f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
)
tokenizer.add_tokens(token, special_tokens=bool(token in special_tokens))
# Check all our special tokens are registrered as "no split" token (we don't cut them) and are in the vocab
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning(
"Special tokens have been added in the vocabulary, make sure the associated word emebedding are fine-tuned or trained."
)
return tokenizer
def save_pretrained(self, save_directory: str) -> Tuple[str]:
"""
Save the tokenizer vocabulary files together with:
- added tokens,
- special tokens to class attributes mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
This method make sure the full tokenizer can then be re-loaded using the
:meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained` class method.
.. Warning::
This won't save modifications you may have applied to the tokenizer after the instantiation (for instance,
modifying :obj:`tokenizer.do_lower_case` after creation).
Args:
save_directory (:obj:`str`): The path to adirectory where the tokenizer will be saved.
Returns:
A tuple of :obj:`str`: The files saved.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
write_dict = {}
for key, value in self.special_tokens_map_extended.items():
if isinstance(value, AddedToken):
write_dict[key] = value.__getstate__()
elif isinstance(value, list):
write_dict[key] = [
token.__getstate__() if isinstance(token, AddedToken) else token for token in value
]
else:
write_dict[key] = value
f.write(json.dumps(write_dict, ensure_ascii=False))
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(added_vocab, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
@add_end_docstrings(
ENCODE_KWARGS_DOCSTRING,
"""
**kwargs: Passed along to the `.tokenize()` method.
""",
"""
Returns:
:obj:`List[int]`, :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`:
The tokenized ids of the text.
""",
)
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> List[int]:
"""
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def num_special_tokens_to_add(self, pair: bool = False) -> int:
raise NotImplementedError
def _get_padding_truncation_strategies(
self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
):
"""
Find the correct padding/truncation strategy with backward compatibility
for old arguments (truncation_strategy and pad_to_max_length) and behaviors.
"""
old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
# Backward compatibility for previous behavior, maybe we should deprecate it:
# If you only set max_length, it activates truncation for max_length
if max_length is not None and padding is False and truncation is False:
if verbose:
logger.warning(
"Truncation was not explicitely activated but `max_length` is provided a specific value, "
"please use `truncation=True` to explicitely truncate examples to max length. "
"Defaulting to 'longest_first' truncation strategy. "
"If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy "
"more precisely by providing a specific strategy to `truncation`."
)
truncation = "longest_first"
# Get padding strategy
if padding is False and old_pad_to_max_length:
if verbose:
warnings.warn(
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
"maximal input size of the model (e.g. 512 for Bert).",
FutureWarning,
)
if max_length is None:
padding_strategy = PaddingStrategy.LONGEST
else:
padding_strategy = PaddingStrategy.MAX_LENGTH
elif padding is not False:
if padding is True:
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Get truncation strategy
if truncation is False and old_truncation_strategy != "do_not_truncate":
if verbose:
warnings.warn(
"The `truncation_strategy` argument is deprecated and will be removed in a future version, "
"use `truncation=True` to truncate examples to a max length. You can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
"maximal input size of the model (e.g. 512 for Bert). "
" If you have pairs of inputs, you can give a specific truncation strategy selected among "
"`truncation='only_first'` (will only truncate the first sentence in the pairs) "
"`truncation='only_second'` (will only truncate the second sentence in the pairs) "
"or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).",
FutureWarning,
)
truncation_strategy = TruncationStrategy(old_truncation_strategy)
elif truncation is not False:
if truncation is True:
truncation_strategy = (
TruncationStrategy.LONGEST_FIRST
) # Default to truncate the longest sequences in pairs of inputs
elif not isinstance(truncation, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation)
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
if self.model_max_length > LARGE_INTEGER:
if verbose:
logger.warning(
"Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no padding."
)
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
if self.model_max_length > LARGE_INTEGER:
if verbose:
logger.warning(
"Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no truncation."
)
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
# Test if we have a padding token
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
raise ValueError(
"Asking to pad but the tokenizer does not have a padding token. "
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
)
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and padding_strategy != PaddingStrategy.DO_NOT_PAD
and pad_to_multiple_of is not None
and max_length is not None
and (max_length % pad_to_multiple_of != 0)
):
raise ValueError(
f"Truncation and padding are both activated but "
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
)
return padding_strategy, truncation_strategy, max_length, kwargs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences.
Args:
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded.
Each sequence can be a string or a list of strings (pretokenized string).
If the sequences are provided as list of strings (pretokenized), you must set
:obj:`is_pretokenized=True` (to lift the ambiguity with a batch of sequences).
text_pair (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded.
Each sequence can be a string or a list of strings (pretokenized string).
If the sequences are provided as list of strings (pretokenized), you must set
:obj:`is_pretokenized=True` (to lift the ambiguity with a batch of sequences).
"""
# Input type checking for clearer error
assert isinstance(text, str) or (
isinstance(text, (list, tuple))
and (
len(text) == 0
or (
isinstance(text[0], str)
or (isinstance(text[0], (list, tuple)) and (len(text[0]) == 0 or isinstance(text[0][0], str)))
)
)
), (
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
assert (
text_pair is None
or isinstance(text_pair, str)
or (
isinstance(text_pair, (list, tuple))
and (
len(text_pair) == 0
or (
isinstance(text_pair[0], str)
or (
isinstance(text_pair[0], (list, tuple))
and (len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str))
)
)
)
)
), (
"text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
is_batched = bool(
(not is_pretokenized and isinstance(text, (list, tuple)))
or (is_pretokenized and isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)))
)
if is_batched:
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences.
.. warning::
This method is deprecated, ``__call__`` should be used instead.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the latter only for not-fast tokenizers)):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
.. warning::
This method is deprecated, ``__call__`` should be used instead.
Args:
batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`, :obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also :obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded.
This can be a list of string/string-sequences/int-sequences or a list of pair of
string/string-sequences/int-sequence (see details in ``encode_plus``).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
List[BatchEncoding],
Dict[str, EncodedInput],
Dict[str, List[EncodedInput]],
List[Dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch.
Padding side (left/right) padding token ids are defined at the tokenizer level
(with ``self.padding_side``, ``self.pad_token_id`` and ``self.pad_token_type_id``)
.. note::
If the ``encoded_inputs`` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
result will use the same type unless you provide a different tensor type with ``return_tensors``. In the
case of PyTorch tensors, you will lose the specific device of your tensors however.
Args:
encoded_inputs (:class:`~transformers.BatchEncoding`, list of :class:`~transformers.BatchEncoding`, :obj:`Dict[str, List[int]]`, :obj:`Dict[str, List[List[int]]` or :obj:`List[Dict[str, List[int]]]`):
Tokenized inputs. Can represent one input (:class:`~transformers.BatchEncoding` or
:obj:`Dict[str, List[int]]`) or a batch of tokenized inputs (list of
:class:`~transformers.BatchEncoding`, `Dict[str, List[List[int]]]` or `List[Dict[str, List[int]]]`) so
you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.
Instead of :obj:`List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors),
see the note above for the return type.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask (:obj:`bool`, `optional`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to print informations and warnings.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)):
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
assert "input_ids" in encoded_inputs, (
"You should supply an encoding or a list of encodings to this method. "
"An encoding is the output of one the encoding methods of the tokenizer, i.e. "
"__call__/encode_plus/batch_encode_plus. "
)
if not encoded_inputs["input_ids"]:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = encoded_inputs["input_ids"][0]
if isinstance(first_element, (list, tuple)) and first_element:
first_element = first_element[0]
if not isinstance(first_element, int):
if is_tf_available() and isinstance(first_element, tf.Tensor):
return_tensors = "tf" if return_tensors is None else return_tensors
elif is_torch_available() and isinstance(first_element, torch.Tensor):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
def to_py_obj(obj):
if isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and isinstance(obj, tf.Tensor):
return obj.numpy().tolist()
elif is_torch_available() and isinstance(obj, torch.Tensor):
return obj.cpu().tolist()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
if encoded_inputs["input_ids"] and not isinstance(encoded_inputs["input_ids"][0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(encoded_inputs["input_ids"])
assert all(
len(v) == batch_size for v in encoded_inputs.values()
), "Some items in the output dictionnary have a different batch size than others."
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in encoded_inputs["input_ids"])
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create the token type IDs corresponding to the sequences passed.
`What are token type IDs? <../glossary.html#token-type-ids>`__
Should be overriden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (:obj:`List[int]`): The first tokenized sequence.
token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence.
Returns:
:obj:`List[int]`: The token type ids.
"""
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
This implementation does not add special tokens and this method should be overriden in a subclass.
Args:
token_ids_0 (:obj:`List[int]`): The first tokenized sequence.
token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence.
Returns:
:obj:`List[int]`: The model input with special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
ids (:obj:`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
pair_ids (:obj:`List[int]`, `optional`):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
"""
if "return_lengths" in kwargs:
if verbose:
warnings.warn(
"The PreTrainedTokenizerBase.prepare_for_model `return_lengths` parameter is deprecated. "
"Please use `return_length` instead.",
FutureWarning,
)
return_length = kwargs["return_lengths"]
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Compute the total size of the returned encodings
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose:
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.model_max_length)
)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (:obj:`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
pair_ids (:obj:`List[int]`, `optional`):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
num_tokens_to_remove (:obj:`int`, `optional`, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation (:obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`):
The strategy to follow for truncation. Can be:
* :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
provided. This will truncate token by token, removing a token from the longest sequence in the pair
if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
sequence lengths greater than the model maximum admissible input size).
max_length (:obj:`int`, `optional`):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (:obj:`int`, `optional`, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
Returns:
:obj:`Tuple[List[int], List[int], List[int]]`:
The truncated ``ids``, the truncated ``pair_ids`` and the list of overflowing tokens.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
if not overflowing_tokens:
window_len = min(len(ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(ids[-window_len:])
ids = ids[:-1]
else:
if not overflowing_tokens:
window_len = min(len(pair_ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(pair_ids[-window_len:])
pair_ids = pair_ids[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input"
f"but the first sequence has a length {len(ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_second'."
)
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input"
f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_first'."
)
return (ids, pair_ids, overflowing_tokens)
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined legnth or max length in the batch)
Args:
encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs["input_ids"])
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = (
padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs["input_ids"]) != max_length
)
if needs_to_be_padded:
difference = max_length - len(encoded_inputs["input_ids"])
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"])
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
return encoded_inputs
def batch_decode(
self, sequences: List[List[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> List[str]:
"""
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (:obj:`List[List[int]]`):
List of tokenized input ids. Can be obtained using the ``__call__`` method.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean up the tokenization spaces.
Returns:
:obj:`List[str]`: The list of decoded sentences.
"""
return [
self.decode(
seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces
)
for seq in sequences
]
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids (:obj:`List[int]`):
List of tokenized input ids. Can be obtained using the ``__call__`` method.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean up the tokenization spaces.
Returns:
:obj:`str`: The decoded sentence.
"""
raise NotImplementedError
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids of the first sequence.
token_ids_1 (:obj:`List[int]`, `optional`):
List of ids of the second sequence.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter or not the token list is already formated with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
assert already_has_special_tokens and token_ids_1 is None, (
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"Please use a slow (full python) tokenizer to activate this argument."
"Or set `return_special_token_mask=True` when calling the encoding method "
"to get the special tokens mask in any tokenizer. "
)
all_special_ids = self.all_special_ids # cache the property
special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]
return special_tokens_mask
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
Args:
out_string (:obj:`str`): The text to clean up.
Returns:
:obj:`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
| 134,034 | 47.405562 | 252 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ALBERT checkpoint."""
import argparse
import logging
import torch
from transformers import AlbertConfig, AlbertForMaskedLM, load_tf_weights_in_albert
logging.basicConfig(level=logging.INFO)
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = AlbertConfig.from_json_file(albert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = AlbertForMaskedLM(config)
# Load weights from tf checkpoint
load_tf_weights_in_albert(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 2,157 | 33.806452 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import json
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .activations import gelu_new, swish
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
logger = logging.getLogger(__name__)
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"
}
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
if ".ckpt" in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))
with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
names = json.load(names_handle)
with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
shapes = json.load(shapes_handle)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + "/params_{}.npy".format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "w":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu_new}
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = config.output_attentions
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None):
attn_outputs = self.attn(x, attention_mask=attention_mask, head_mask=head_mask)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class OpenAIGPTPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_openai_gpt
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
OPENAI_GPT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.OpenAIGPTTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.init_weights()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrice from position and token embeddings
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(input_shape[-1], dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = ()
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, attention_mask, head_mask[i])
hidden_states = outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = (hidden_states.view(*output_shape),)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden states), (all attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, (all hidden states), (all attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
lm_labels=None,
mc_labels=None,
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``lm_labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`multiple_choice_labels` is provided):
Multiple choice classification loss.
lm_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!)
model.resize_token_embeddings(len(tokenizer))
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, (all hidden_states), (attentions)
| 32,497 | 45.827089 | 177 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_t5_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The T5 authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert T5 checkpoint."""
import argparse
import logging
import torch
from transformers import T5Config, T5Model, load_tf_weights_in_t5
logging.basicConfig(level=logging.INFO)
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
# Initialise PyTorch model
config = T5Config.from_json_file(config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = T5Model(config)
# Load weights from tf checkpoint
load_tf_weights_in_t5(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 2,100 | 32.887097 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/pipelines.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
import os
import pickle
import sys
from abc import ABC, abstractmethod
from contextlib import contextmanager
from os.path import abspath, exists
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoConfig
from .configuration_bart import BartConfig
from .configuration_distilbert import DistilBertConfig
from .configuration_roberta import RobertaConfig
from .configuration_t5 import T5Config
from .configuration_utils import PretrainedConfig
from .configuration_xlm import XLMConfig
from .data import SquadExample, squad_convert_examples_to_features
from .file_utils import is_tf_available, is_torch_available
from .modelcard import ModelCard
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer
if is_tf_available():
import tensorflow as tf
from .modeling_tf_auto import (
TFAutoModel,
TFAutoModelForSequenceClassification,
TFAutoModelForQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
)
if is_torch_available():
import torch
from .modeling_auto import (
AutoModel,
AutoModelForSequenceClassification,
AutoModelForQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
)
logger = logging.getLogger(__name__)
def get_framework(model=None):
""" Select framework (TensorFlow/PyTorch) to use.
If both frameworks are installed and no specific model is provided, defaults to using PyTorch.
"""
if is_tf_available() and is_torch_available() and model is not None and not isinstance(model, str):
# Both framework are available but the user supplied a model class instance.
# Try to guess which framework to use from the model classname
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
elif not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
else:
# framework = 'tf' if is_tf_available() else 'pt'
framework = "pt" if is_torch_available() else "tf"
return framework
class ArgumentHandler(ABC):
"""
Base interface for handling varargs for each Pipeline
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class DefaultArgumentHandler(ArgumentHandler):
"""
Default varargs argument parser handling parameters for each Pipeline
"""
def __call__(self, *args, **kwargs):
if "X" in kwargs:
return kwargs["X"]
elif "data" in kwargs:
return kwargs["data"]
elif len(args) == 1:
if isinstance(args[0], list):
return args[0]
else:
return [args[0]]
elif len(args) > 1:
return list(args)
raise ValueError("Unable to infer the format of the provided data (X=, data=, ...)")
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing.
Supported data formats currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
PipelineDataFormat also includes some utilities to work with multi-columns like mapping from datasets columns
to pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: dict):
"""
Save the provided data object with the representation for the current `DataFormat`.
:param data: data to store
:return:
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
:param data: data to store
:return: (str) Path where the data has been saved
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process.
For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations.
Pipeline workflow is defined as a sequence of the following operations:
Input -> Tokenization -> Model Inference -> Post-Processing (Task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument. Users can specify
device argument as an integer, -1 meaning "CPU", >= 0 referring the CUDA device ordinal.
Some pipeline, like for instance FeatureExtractionPipeline ('feature-extraction') outputs large
tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the binary_output constructor argument. If set to True, the output will be stored in the
pickle format.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e. pickle) or as raw text.
Return:
:obj:`List` or :obj:`Dict`:
Pipeline returns list or dictionary depending on:
- Whether the user supplied multiple samples
- Whether the pipeline exposes multiple fields in the output object
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework = get_framework()
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
self.binary_output = binary_output
self._args_parser = args_parser or DefaultArgumentHandler()
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory):
"""
Save the pipeline's model and tokenizer to the specified save_directory
"""
if not os.path.isdir(save_directory):
logger.error("Provided path ({}) should be a directory".format(save_directory))
return
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
example:
# Explicitly ask for tensor allocation on CUDA device :0
nlp = pipeline(..., device=0)
with nlp.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = nlp(...)
Returns:
Context manager
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
:param inputs:
:return:
"""
return {name: tensor.to(self.device) for name, tensor in inputs.items()}
def inputs_for_model(self, features: Union[dict, List[dict]]) -> Dict:
"""
Generates the input dictionary with model-specific parameters.
Returns:
dict holding all the required parameters for model's forward
"""
args = ["input_ids", "attention_mask"]
if not isinstance(self.model.config, (DistilBertConfig, XLMConfig, RobertaConfig, BartConfig, T5Config)):
args += ["token_type_ids"]
# PR #1548 (CLI) There is an issue with attention_mask
# if 'xlnet' in model_type or 'xlm' in model_type:
# args += ['cls_index', 'p_mask']
if isinstance(features, dict):
return {k: features[k] for k in args}
else:
return {k: [feature[k] for feature in features] for k in args}
def _parse_and_tokenize(self, *texts, pad_to_max_length=False, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self._args_parser(*texts, **kwargs)
inputs = self.tokenizer.batch_encode_plus(
inputs,
add_special_tokens=True,
return_tensors=self.framework,
max_length=self.tokenizer.max_len,
pad_to_max_length=pad_to_max_length,
)
# Filter out features not available on specific models
inputs = self.inputs_for_model(inputs)
return inputs
def __call__(self, *texts, **kwargs):
inputs = self._parse_and_tokenize(*texts, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching.
Args:
inputs: dict holding all the keyworded arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array.
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using Model head. This pipeline extracts the hidden states from the base transformer,
which can be used as features in a downstream tasks.
This feature extraction pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "feature-extraction", for extracting features of a sequence.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs).tolist()
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using ModelForSequenceClassification head. See the
`sequence classification usage <../usage.html#sequence-classification>`__ examples for more information.
This text classification pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "sentiment-analysis", for classifying sequences according to positive or negative sentiments.
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task.
See the list of available community models fine-tuned on such a task on
`huggingface.co/models <https://huggingface.co/models?search=&filter=text-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __call__(self, *args, **kwargs):
outputs = super().__call__(*args, **kwargs)
scores = np.exp(outputs) / np.exp(outputs).sum(-1)
return [{"label": self.model.config.id2label[item.argmax()], "score": item.max()} for item in scores]
class FillMaskPipeline(Pipeline):
"""
Masked language modeling prediction pipeline using ModelWithLMHead head. See the
`masked language modeling usage <../usage.html#masked-language-modeling>`__ examples for more information.
This mask filling pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "fill-mask", for predicting masked tokens in a sequence.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library.
See the list of available community models on
`huggingface.co/models <https://huggingface.co/models?search=&filter=lm-head>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
topk=5,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
self.topk = topk
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
outputs = self._forward(inputs, return_tensors=True)
results = []
batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)
for i in range(batch_size):
input_ids = inputs["input_ids"][i]
result = []
if self.framework == "tf":
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy().item()
logits = outputs[i, masked_index, :]
probs = tf.nn.softmax(logits)
topk = tf.math.top_k(probs, k=self.topk)
values, predictions = topk.values.numpy(), topk.indices.numpy()
else:
masked_index = (input_ids == self.tokenizer.mask_token_id).nonzero().item()
logits = outputs[i, masked_index, :]
probs = logits.softmax(dim=0)
values, predictions = probs.topk(self.topk)
for v, p in zip(values.tolist(), predictions.tolist()):
tokens = input_ids.numpy()
tokens[masked_index] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
result.append({"sequence": self.tokenizer.decode(tokens), "score": v, "token": p})
# Append
results += [result]
if len(results) == 1:
return results[0]
return results
class NerPipeline(Pipeline):
"""
Named Entity Recognition pipeline using ModelForTokenClassification head. See the
`named entity recognition usage <../usage.html#named-entity-recognition>`__ examples for more information.
This token recognition pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "ner", for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous.
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the list of available community models fine-tuned on such a task on
`huggingface.co/models <https://huggingface.co/models?search=&filter=token-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "sequences"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
ignore_labels=["O"],
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=binary_output,
task=task,
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.ignore_labels = ignore_labels
def __call__(self, *texts, **kwargs):
inputs = self._args_parser(*texts, **kwargs)
answers = []
for sentence in inputs:
# Manage correct placement of the tensors
with self.device_placement():
tokens = self.tokenizer.encode_plus(
sentence,
return_attention_mask=False,
return_tensors=self.framework,
max_length=self.tokenizer.max_len,
)
# Forward
if self.framework == "tf":
entities = self.model(tokens)[0][0].numpy()
input_ids = tokens["input_ids"].numpy()[0]
else:
with torch.no_grad():
tokens = self.ensure_tensor_on_device(**tokens)
entities = self.model(**tokens)[0][0].cpu().numpy()
input_ids = tokens["input_ids"].cpu().numpy()[0]
score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)
answer = []
for idx, label_idx in enumerate(labels_idx):
if self.model.config.id2label[label_idx] not in self.ignore_labels:
answer += [
{
"word": self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])),
"score": score[idx][label_idx].item(),
"entity": self.model.config.id2label[label_idx],
}
]
# Append
answers += [answer]
if len(answers) == 1:
return answers[0]
return answers
TokenClassificationPipeline = NerPipeline
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped
to internal SquadExample / SquadFeature structures.
QuestionAnsweringArgumentHandler manages all the possible to create SquadExample from the command-line supplied
arguments.
"""
def __call__(self, *args, **kwargs):
# Position args, handling is sensibly the same as X and data, so forwarding to avoid duplicating
if args is not None and len(args) > 0:
if len(args) == 1:
kwargs["X"] = args[0]
else:
kwargs["X"] = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
if "X" in kwargs or "data" in kwargs:
inputs = kwargs["X"] if "X" in kwargs else kwargs["data"]
if isinstance(inputs, dict):
inputs = [inputs]
else:
# Copy to avoid overriding arguments
inputs = [i for i in inputs]
for i, item in enumerate(inputs):
if isinstance(item, dict):
if any(k not in item for k in ["question", "context"]):
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
inputs[i] = QuestionAnsweringPipeline.create_sample(**item)
elif not isinstance(item, SquadExample):
raise ValueError(
"{} argument needs to be of type (list[SquadExample | dict], SquadExample, dict)".format(
"X" if "X" in kwargs else "data"
)
)
# Tabular input
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], str):
kwargs["question"] = [kwargs["question"]]
if isinstance(kwargs["context"], str):
kwargs["context"] = [kwargs["context"]]
inputs = [
QuestionAnsweringPipeline.create_sample(q, c) for q, c in zip(kwargs["question"], kwargs["context"])
]
else:
raise ValueError("Unknown arguments {}".format(kwargs))
if not isinstance(inputs, list):
inputs = [inputs]
return inputs
class QuestionAnsweringPipeline(Pipeline):
"""
Question Answering pipeline using ModelForQuestionAnswering head. See the
`question answering usage <../usage.html#question-answering>`__ examples for more information.
This question answering can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "question-answering", for answering questions given a context.
The models that this pipeline can use are models that have been fine-tuned on a question answering task.
See the list of available community models fine-tuned on such a task on
`huggingface.co/models <https://huggingface.co/models?search=&filter=question-answering>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "question,context"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
device: int = -1,
task: str = "",
**kwargs
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
task=task,
**kwargs,
)
@staticmethod
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the SquadExample/SquadFeatures internally.
This helper method encapsulate all the logic for converting question(s) and context(s) to SquadExample(s).
We currently support extractive question answering.
Arguments:
question: (str, List[str]) The question to be ask for the associated context
context: (str, List[str]) The context in which we will look for the answer.
Returns:
SquadExample initialized with the corresponding question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def __call__(self, *texts, **kwargs):
"""
Args:
We support multiple use-cases, the following are exclusive:
X: sequence of SquadExample
data: sequence of SquadExample
question: (str, List[str]), batch of question(s) to map along with context
context: (str, List[str]), batch of context(s) associated with the provided question keyword argument
Returns:
dict: {'answer': str, 'score": float, 'start": int, "end": int}
answer: the textual answer in the intial context
score: the score the current answer scored for the model
start: the character index in the original string corresponding to the beginning of the answer' span
end: the character index in the original string corresponding to the ending of the answer' span
"""
# Set defaults values
kwargs.setdefault("topk", 1)
kwargs.setdefault("doc_stride", 128)
kwargs.setdefault("max_answer_len", 15)
kwargs.setdefault("max_seq_len", 384)
kwargs.setdefault("max_question_len", 64)
if kwargs["topk"] < 1:
raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"]))
if kwargs["max_answer_len"] < 1:
raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"]))
# Convert inputs to features
examples = self._args_parser(*texts, **kwargs)
features_list = [
squad_convert_examples_to_features(
[example],
self.tokenizer,
kwargs["max_seq_len"],
kwargs["doc_stride"],
kwargs["max_question_len"],
False,
)
for example in examples
]
all_answers = []
for features, example in zip(features_list, examples):
fw_args = self.inputs_for_model([f.__dict__ for f in features])
# Manage tensor allocation on correct device
with self.device_placement():
if self.framework == "tf":
fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()}
start, end = self.model(fw_args)
start, end = start.numpy(), end.numpy()
else:
with torch.no_grad():
# Retrieve the score for the context tokens only (removing question tokens)
fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()}
start, end = self.model(**fw_args)
start, end = start.cpu().numpy(), end.cpu().numpy()
answers = []
for (feature, start_, end_) in zip(features, start, end):
# Normalize logits and spans to retrieve the answer
start_ = np.exp(start_) / np.sum(np.exp(start_))
end_ = np.exp(end_) / np.sum(np.exp(end_))
# Mask padding and question
start_, end_ = (
start_ * np.abs(np.array(feature.p_mask) - 1),
end_ * np.abs(np.array(feature.p_mask) - 1),
)
# TODO : What happens if not possible
# Mask CLS
start_[0] = end_[0] = 0
starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"])
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
answers += [
{
"score": score.item(),
"start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(
example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1]
),
}
for s, e, score in zip(starts, ends, scores)
]
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]]
all_answers += answers
if len(all_answers) == 1:
return all_answers[0]
return all_answers
def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
"""
Take the output of any QuestionAnswering head and will generate probalities for each span to be
the actual answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than
max_answer_len or answer end position being before the starting position.
The method supports output the k-best answer through the topk argument.
Args:
start: numpy array, holding individual start probabilities for each token
end: numpy array, holding individual end probabilities for each token
topk: int, indicates how many possible answer span(s) to extract from the model's output
max_answer_len: int, maximum size of the answer to extract from the model's output
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
return start, end, candidates[0, start, end]
def span_to_answer(self, text: str, start: int, end: int):
"""
When decoding from token probalities, this method maps token indexes to actual word in
the initial context.
Args:
text: str, the actual context to extract the answer from
start: int, starting answer token index
end: int, ending answer token index
Returns:
dict: {'answer': str, 'start': int, 'end': int}
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
class SummarizationPipeline(Pipeline):
"""
Summarize news articles and other documents
Usage::
# use bart in pytorch
summarizer = pipeline("summarization")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
# use t5 in tf
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
Supported Models:
The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.
Arguments:
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __call__(
self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Args:
*documents: (list of strings) articles to be summarized
return_text: (bool, default=True) whether to add a decoded "summary_text" to each result
return_tensors: (bool, default=False) whether to return the raw "summary_token_ids" to each result
clean_up_tokenization_spaces: (`optional`) bool whether to include extra spaces in the output
**generate_kwargs: extra kwargs passed to `self.model.generate`_
Returns:
list of dicts with 'summary_text' and/or 'summary_token_ids' for each document_to_summarize
.. _`self.model.generate`:
https://huggingface.co/transformers/model_doc/bart.html#transformers.BartForConditionalGeneration.generate
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
assert len(documents) > 0, "Please provide a document to summarize"
if self.framework == "tf" and "BartForConditionalGeneration" in self.model.__class__.__name__:
raise NotImplementedError(
"Tensorflow is not yet supported for Bart. Please consider using T5, e.g. `t5-base`"
)
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(documents[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
documents = ([prefix + document for document in documents[0]],)
pad_to_max_length = True
elif isinstance(documents[0], str):
documents = (prefix + documents[0],)
pad_to_max_length = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
documents[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*documents, pad_to_max_length=pad_to_max_length)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
min_length = generate_kwargs.get("min_length", self.model.config.min_length)
if input_length < min_length // 2:
logger.warning(
"Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length manually, e.g. summarizer('...', min_length=10)".format(
min_length, input_length
)
)
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length < max_length:
logger.warning(
"Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
max_length, input_length
)
)
summaries = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
)
results = []
for summary in summaries:
record = {}
if return_tensors:
record["summary_token_ids"] = summary
if return_text:
record["summary_text"] = self.tokenizer.decode(
summary, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
class TranslationPipeline(Pipeline):
"""
Translates from one language to another.
Usage::
en_fr_translator = pipeline("translation_en_to_fr")
en_fr_translator("How old are you?")
Supported Models: "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"
Arguments:
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __call__(
self, *texts, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Args:
*texts: (list of strings) texts to be translated
return_text: (bool, default=True) whether to add a decoded "translation_text" to each result
return_tensors: (bool, default=False) whether to return the raw "translation_token_ids" to each result
**generate_kwargs: extra kwargs passed to `self.model.generate`_
Returns:
list of dicts with 'translation_text' and/or 'translation_token_ids' for each text_to_translate
.. _`self.model.generate`:
https://huggingface.co/transformers/model_doc/bart.html#transformers.BartForConditionalGeneration.generate
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(texts[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
texts = ([prefix + text for text in texts[0]],)
pad_to_max_length = True
elif isinstance(texts[0], str):
texts = (prefix + texts[0],)
pad_to_max_length = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
texts[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*texts, pad_to_max_length=pad_to_max_length)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length > 0.9 * max_length:
logger.warning(
"Your input_length: {} is bigger than 0.9 * max_length: {}. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)".format(
input_length, max_length
)
)
translations = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
)
results = []
for translation in translations:
record = {}
if return_tensors:
record["translation_token_ids"] = translation
if return_text:
record["translation_text"] = self.tokenizer.decode(
translation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
# Register all the supported task here
SUPPORTED_TASKS = {
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": TFAutoModel if is_tf_available() else None,
"pt": AutoModel if is_torch_available() else None,
"default": {
"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"},
"config": None,
"tokenizer": "distilbert-base-cased",
},
},
"sentiment-analysis": {
"impl": TextClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "distilbert-base-uncased-finetuned-sst-2-english",
"tf": "distilbert-base-uncased-finetuned-sst-2-english",
},
"config": "distilbert-base-uncased-finetuned-sst-2-english",
"tokenizer": "distilbert-base-uncased",
},
},
"ner": {
"impl": NerPipeline,
"tf": TFAutoModelForTokenClassification if is_tf_available() else None,
"pt": AutoModelForTokenClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "dbmdz/bert-large-cased-finetuned-conll03-english",
"tf": "dbmdz/bert-large-cased-finetuned-conll03-english",
},
"config": "dbmdz/bert-large-cased-finetuned-conll03-english",
"tokenizer": "bert-large-cased",
},
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": TFAutoModelForQuestionAnswering if is_tf_available() else None,
"pt": AutoModelForQuestionAnswering if is_torch_available() else None,
"default": {
"model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"},
"config": None,
"tokenizer": ("distilbert-base-cased", {"use_fast": False}),
},
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {
"model": {"pt": "distilroberta-base", "tf": "distilroberta-base"},
"config": None,
"tokenizer": ("distilroberta-base", {"use_fast": False}),
},
},
"summarization": {
"impl": SummarizationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {
"model": {"pt": "bart-large-cnn", "tf": None},
"config": None,
"tokenizer": ("bart-large-cnn", {"use_fast": False}),
},
},
"translation_en_to_fr": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {
"model": {"pt": "t5-base", "tf": "t5-base"},
"config": None,
"tokenizer": ("t5-base", {"use_fast": False}),
},
},
"translation_en_to_de": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {
"model": {"pt": "t5-base", "tf": "t5-base"},
"config": None,
"tokenizer": ("t5-base", {"use_fast": False}),
},
},
"translation_en_to_ro": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {
"model": {"pt": "t5-base", "tf": "t5-base"},
"config": None,
"tokenizer": ("t5-base", {"use_fast": False}),
},
},
}
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a pipeline.
Pipeline are made of:
- A Tokenizer instance in charge of mapping raw textual input to token
- A Model instance
- Some (optional) post processing for enhancing model's output
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- "feature-extraction": will return a :class:`~transformers.FeatureExtractionPipeline`
- "sentiment-analysis": will return a :class:`~transformers.TextClassificationPipeline`
- "ner": will return a :class:`~transformers.NerPipeline`
- "question-answering": will return a :class:`~transformers.QuestionAnsweringPipeline`
- "fill-mask": will return a :class:`~transformers.FillMaskPipeline`
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`, defaults to :obj:`None`):
The configuration that will be used by the pipeline to instantiate the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
Returns:
:class:`~transformers.Pipeline`: Class inheriting from :class:`~transformers.Pipeline`, according to
the task.
Examples::
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
# Sentiment analysis pipeline
pipeline('sentiment-analysis')
# Question answering pipeline, specifying the checkpoint identifier
pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
# Named entity recognition pipeline, passing in a specific model and tokenizer
model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
pipeline('ner', model=model, tokenizer=tokenizer)
# Named entity recognition pipeline, passing a model and configuration with a HTTPS URL.
model_url = "https://s3.amazonaws.com/models.huggingface.co/bert/dbmdz/bert-large-cased-finetuned-conll03-english/pytorch_model.bin"
config_url = "https://s3.amazonaws.com/models.huggingface.co/bert/dbmdz/bert-large-cased-finetuned-conll03-english/config.json"
pipeline('ner', model=model_url, config=config_url, tokenizer='bert-base-cased')
"""
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
framework = framework or get_framework(model)
targeted_task = SUPPORTED_TASKS[task]
task_class, model_class = targeted_task["impl"], targeted_task[framework]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
models, config, tokenizer = [targeted_task["default"][k] for k in ["model", "config", "tokenizer"]]
model = models[framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str) and model in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
tokenizer = model
elif isinstance(config, str) and config in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/url/shortcut name to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs,)
| 71,183 | 42.75169 | 211 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
import argparse
import logging
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2
logging.basicConfig(level=logging.INFO)
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config.from_json_file(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help="An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.",
)
args = parser.parse_args()
convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path)
| 2,507 | 35.882353 | 119 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 RoBERTa model. """
import logging
import tensorflow as tf
from .configuration_roberta import RobertaConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_bert import TFBertEmbeddings, TFBertMainLayer, gelu
from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list
logger = logging.getLogger(__name__)
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
"roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-tf_model.h5",
"roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-tf_model.h5",
"roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-tf_model.h5",
"distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-tf_model.h5",
}
class TFRobertaEmbeddings(TFBertEmbeddings):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.padding_idx = 1
def create_position_ids_from_input_ids(self, x):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
mask = tf.cast(tf.math.not_equal(x, self.padding_idx), dtype=tf.int32)
incremental_indicies = tf.math.cumsum(mask, axis=1) * mask
return incremental_indicies + self.padding_idx
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
""" We are provided embeddings directly. We cannot infer which are padded so just generate
sequential position ids.
:param torch.Tensor inputs_embeds:
:return torch.Tensor:
"""
seq_length = shape_list(inputs_embeds)[1]
position_ids = tf.range(self.padding_idx + 1, seq_length + self.padding_idx + 1, dtype=tf.int32)[tf.newaxis, :]
return position_ids
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, position_ids, token_type_ids, inputs_embeds = inputs
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
return super()._embedding([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)
class TFRobertaMainLayer(TFBertMainLayer):
"""
Same as TFBertMainLayer but uses TFRobertaEmbeddings.
"""
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.embeddings = TFRobertaEmbeddings(config, name="embeddings")
def get_input_embeddings(self):
return self.embeddings
class TFRobertaPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = RobertaConfig
pretrained_model_archive_map = TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
ROBERTA_START_DOCSTRING = r"""
This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.RobertaTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, embedding_dim)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputing raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class TFRobertaModel(TFRobertaPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.roberta = TFRobertaMainLayer(config, name="roberta")
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import RobertaTokenizer, TFRobertaModel
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = TFRobertaModel.from_pretrained('roberta-base')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.roberta(inputs, **kwargs)
return outputs
class TFRobertaLMHead(tf.keras.layers.Layer):
"""Roberta Head for masked language modeling."""
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.act = tf.keras.layers.Activation(gelu)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, features):
x = self.dense(features)
x = self.act(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x, mode="linear") + self.bias
return x
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class TFRobertaForMaskedLM(TFRobertaPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.roberta = TFRobertaMainLayer(config, name="roberta")
self.lm_head = TFRobertaLMHead(config, self.roberta.embeddings, name="lm_head")
def get_output_embeddings(self):
return self.lm_head.decoder
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
prediction_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import RobertaTokenizer, TFRobertaForMaskedLM
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = TFRobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores = outputs[0]
"""
outputs = self.roberta(inputs, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
return outputs # prediction_scores, (hidden_states), (attentions)
class TFRobertaClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
def call(self, features, training=False):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x, training=training)
x = self.dense(x)
x = self.dropout(x, training=training)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
ROBERTA_START_DOCSTRING,
)
class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roberta = TFRobertaMainLayer(config, name="roberta")
self.classifier = TFRobertaClassificationHead(config, name="classifier")
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
logits (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import RobertaTokenizer, TFRobertaForSequenceClassification
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = TFRobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
labels = tf.constant([1])[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
outputs = self.roberta(inputs, **kwargs)
sequence_output = outputs[0]
logits = self.classifier(sequence_output, training=kwargs.get("training", False))
outputs = (logits,) + outputs[2:]
return outputs # logits, (hidden_states), (attentions)
@add_start_docstrings(
"""RoBERTa Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
ROBERTA_START_DOCSTRING,
)
class TFRobertaForTokenClassification(TFRobertaPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roberta = TFRobertaMainLayer(config, name="roberta")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import RobertaTokenizer, TFRobertaForTokenClassification
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = TFRobertaForTokenClassification.from_pretrained('roberta-base')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
scores = outputs[0]
"""
outputs = self.roberta(inputs, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=kwargs.get("training", False))
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
return outputs # scores, (hidden_states), (attentions)
| 21,910 | 48.238202 | 161 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_electra_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ELECTRA checkpoint."""
import argparse
import logging
import torch
from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra
logging.basicConfig(level=logging.INFO)
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator):
# Initialise PyTorch model
config = ElectraConfig.from_json_file(config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
if discriminator_or_generator == "discriminator":
model = ElectraForPreTraining(config)
elif discriminator_or_generator == "generator":
model = ElectraForMaskedLM(config)
else:
raise ValueError("The discriminator_or_generator argument should be either 'discriminator' or 'generator'")
# Load weights from tf checkpoint
load_tf_weights_in_electra(
model, config, tf_checkpoint_path, discriminator_or_generator=discriminator_or_generator
)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--discriminator_or_generator",
default=None,
type=str,
required=True,
help="Whether to export the generator or the discriminator. Should be a string, either 'discriminator' or "
"'generator'.",
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.discriminator_or_generator
)
| 2,853 | 34.675 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_electra.py | import logging
import os
import torch
import torch.nn as nn
from transformers import ElectraConfig, add_start_docstrings
from transformers.activations import get_activation
from .file_utils import add_start_docstrings_to_callable
from .modeling_bert import BertEmbeddings, BertEncoder, BertLayerNorm, BertPreTrainedModel
logger = logging.getLogger(__name__)
ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP = {
"google/electra-small-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-generator/pytorch_model.bin",
"google/electra-base-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-generator/pytorch_model.bin",
"google/electra-large-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-generator/pytorch_model.bin",
"google/electra-small-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-discriminator/pytorch_model.bin",
"google/electra-base-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-discriminator/pytorch_model.bin",
"google/electra-large-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-discriminator/pytorch_model.bin",
}
def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
original_name: str = name
try:
if isinstance(model, ElectraForMaskedLM):
name = name.replace("electra/embeddings/", "generator/embeddings/")
if discriminator_or_generator == "generator":
name = name.replace("electra/", "discriminator/")
name = name.replace("generator/", "electra/")
name = name.replace("dense_1", "dense_prediction")
name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")
name = name.split("/")
# print(original_name, name)
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["global_step", "temperature"] for n in name):
logger.info("Skipping {}".format(original_name))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name.endswith("_embeddings"):
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape, original_name
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name), original_name)
pointer.data = torch.from_numpy(array)
except AttributeError as e:
print("Skipping {}".format(original_name), name, e)
continue
return model
class ElectraEmbeddings(BertEmbeddings):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.embedding_size, eps=config.layer_norm_eps)
class ElectraDiscriminatorPredictions(nn.Module):
"""Prediction module for the discriminator, made up of two dense layers."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dense_prediction = nn.Linear(config.hidden_size, 1)
self.config = config
def forward(self, discriminator_hidden_states, attention_mask):
hidden_states = self.dense(discriminator_hidden_states)
hidden_states = get_activation(self.config.hidden_act)(hidden_states)
logits = self.dense_prediction(hidden_states).squeeze()
return logits
class ElectraGeneratorPredictions(nn.Module):
"""Prediction module for the generator, made up of two dense layers."""
def __init__(self, config):
super().__init__()
self.LayerNorm = BertLayerNorm(config.embedding_size)
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
def forward(self, generator_hidden_states):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class ElectraPreTrainedModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = ElectraConfig
pretrained_model_archive_map = ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_electra
base_model_prefix = "electra"
def get_extended_attention_mask(self, attention_mask, input_shape, device):
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = causal_mask.to(
attention_mask.dtype
) # causal and attention masks must have same type with pytorch version < 1.3
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
num_hidden_layers = self.config.num_hidden_layers
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * num_hidden_layers
return head_mask
ELECTRA_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.ElectraConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ELECTRA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.ElectraTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
@add_start_docstrings(
"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
"hidden size and embedding size are different."
""
"Both the generator and discriminator checkpoints may be loaded into this model.",
ELECTRA_START_DOCSTRING,
)
class ElectraModel(ElectraPreTrainedModel):
config_class = ElectraConfig
def __init__(self, config):
super().__init__(config)
self.embeddings = ElectraEmbeddings(config)
if config.embedding_size != config.hidden_size:
self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
self.encoder = BertEncoder(config)
self.config = config
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraModel, ElectraTokenizer
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = ElectraModel.from_pretrained('google/electra-small-discriminator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
head_mask = self.get_head_mask(head_mask)
hidden_states = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states)
hidden_states = self.encoder(hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask)
return hidden_states
@add_start_docstrings(
"""
Electra model with a binary classification head on top as used during pre-training for identifying generated
tokens.
It is recommended to load the discriminator checkpoint into that model.""",
ELECTRA_START_DOCSTRING,
)
class ElectraForPreTraining(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.discriminator_predictions = ElectraDiscriminatorPredictions(config)
self.init_weights()
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates the token is an original token,
``1`` indicates the token was replaced.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss of the ELECTRA objective.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`)
Prediction scores of the head (scores for each token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraTokenizer, ElectraForPreTraining
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = ElectraForPreTraining.from_pretrained('google/electra-small-discriminator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds
)
discriminator_sequence_output = discriminator_hidden_states[0]
logits = self.discriminator_predictions(discriminator_sequence_output, attention_mask)
output = (logits,)
if labels is not None:
loss_fct = nn.BCEWithLogitsLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
active_labels = labels[active_loss]
loss = loss_fct(active_logits, active_labels.float())
else:
loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
output = (loss,) + output
output += discriminator_hidden_states[1:]
return output # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings(
"""
Electra model with a language modeling head on top.
Even though both the discriminator and generator may be loaded into this model, the generator is
the only model of the two to have been trained for the masked language modeling task.""",
ELECTRA_START_DOCSTRING,
)
class ElectraForMaskedLM(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.generator_predictions = ElectraGeneratorPredictions(config)
self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
self.init_weights()
def get_output_embeddings(self):
return self.generator_lm_head
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraTokenizer, ElectraForMaskedLM
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-generator')
model = ElectraForMaskedLM.from_pretrained('google/electra-small-generator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
generator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output)
prediction_scores = self.generator_lm_head(prediction_scores)
output = (prediction_scores,)
# Masked language modeling softmax layer
if masked_lm_labels is not None:
loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
output = (loss,) + output
output += generator_hidden_states[1:]
return output # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings(
"""
Electra model with a token classification head on top.
Both the discriminator and generator may be loaded into this model.""",
ELECTRA_START_DOCSTRING,
)
class ElectraForTokenClassification(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraTokenizer, ElectraForTokenClassification
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = ElectraForTokenClassification.from_pretrained('google/electra-small-discriminator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds
)
discriminator_sequence_output = discriminator_hidden_states[0]
discriminator_sequence_output = self.dropout(discriminator_sequence_output)
logits = self.classifier(discriminator_sequence_output)
output = (logits,)
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.config.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
output = (loss,) + output
output += discriminator_hidden_states[1:]
return output # (loss), scores, (hidden_states), (attentions)
| 31,884 | 46.447917 | 154 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert RoBERTa checkpoint."""
import argparse
import logging
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers.modeling_bert import (
BertConfig,
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.modeling_roberta import RobertaForMaskedLM, RobertaForSequenceClassification
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SAMPLE_TEXT = "Hello world! cécé herlolip"
def convert_roberta_checkpoint_to_pytorch(
roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
):
"""
Copy/paste/tweak roberta's weights to our BERT structure.
"""
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval() # disable dropout
roberta_sent_encoder = roberta.model.decoder.sentence_encoder
config = BertConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
hidden_size=roberta.args.encoder_embed_dim,
num_hidden_layers=roberta.args.encoder_layers,
num_attention_heads=roberta.args.encoder_attention_heads,
intermediate_size=roberta.args.encoder_ffn_embed_dim,
max_position_embeddings=514,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
if classification_head:
config.num_labels = roberta.args.num_classes
print("Our BERT config:", config)
model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
model.eval()
# Now let's copy all the weights.
# Embeddings
model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight
) # just zero them out b/c RoBERTa doesn't use them.
model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
layer: BertLayer = model.roberta.encoder.layer[i]
roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
# self attention
self_attn: BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
)
self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
# self-attention output
self_output: BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
# intermediate
intermediate: BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
intermediate.dense.weight = roberta_layer.fc1.weight
intermediate.dense.bias = roberta_layer.fc1.bias
# output
bert_output: BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
bert_output.dense.weight = roberta_layer.fc2.weight
bert_output.dense.bias = roberta_layer.fc2.bias
bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
# end of layer
if classification_head:
model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight
model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias
model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight
model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
model.lm_head.bias = roberta.model.decoder.lm_head.bias
# Let's check that we get the same results.
input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
our_output = model(input_ids)[0]
if classification_head:
their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids))
else:
their_output = roberta.model(input_ids)[0]
print(our_output.shape, their_output.shape)
max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
success = torch.allclose(our_output, their_output, atol=1e-3)
print("Do both models output the same tensors?", "🔥" if success else "💩")
if not success:
raise Exception("Something went wRoNg")
pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
args = parser.parse_args()
convert_roberta_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 7,932 | 43.072222 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/tokenization_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
import collections
import logging
import os
import unicodedata
from typing import List, Optional
from tokenizers import BertWordPieceTokenizer
from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
"bert-base-german-cased": "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
"bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt",
"bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-vocab.txt",
"bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/vocab.txt",
"bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/vocab.txt",
"bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"bert-base-finnish-cased-v1": 512,
"bert-base-finnish-uncased-v1": 512,
"bert-base-dutch-cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"bert-base-finnish-cased-v1": {"do_lower_case": False},
"bert-base-finnish-uncased-v1": {"do_lower_case": True},
"bert-base-dutch-cased": {"do_lower_case": False},
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(PreTrainedTokenizer):
r"""
Constructs a BERT tokenizer. Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
should refer to the superclass for more information regarding methods.
Args:
vocab_file (:obj:`string`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lowercase the input when tokenizing.
do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to do basic tokenization before WordPiece.
never_split (:obj:`bool`, `optional`, defaults to :obj:`True`):
List of tokens which will never be split during tokenization. Only has an effect when
:obj:`do_basic_tokenize=True`
unk_token (:obj:`string`, `optional`, defaults to "[UNK]"):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`string`, `optional`, defaults to "[SEP]"):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
for sequence classification or for a text and a question for question answering.
It is also used as the last token of a sequence built with special tokens.
pad_token (:obj:`string`, `optional`, defaults to "[PAD]"):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`string`, `optional`, defaults to "[CLS]"):
The classifier token which is used when doing sequence classification (classification of the whole
sequence instead of per-token classification). It is the first token of the sequence when built with
special tokens.
mask_token (:obj:`string`, `optional`, defaults to "[MASK]"):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/transformers/issues/328
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
**kwargs
):
super().__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True if the token list is already formatted with special tokens for the model
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
if token_ids_1 is None, only returns the first portion of the mask (0's).
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, vocab_path):
"""
Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory.
Args:
vocab_path (:obj:`str`):
The directory in which to save the vocabulary.
Returns:
:obj:`Tuple(str)`: Paths to the files saved.
"""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"])
else:
vocab_file = vocab_path
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file)
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
never_split = self.never_split + (never_split if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
class BertTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
clean_text=True,
tokenize_chinese_chars=True,
add_special_tokens=True,
strip_accents=True,
wordpieces_prefix="##",
**kwargs
):
super().__init__(
BertWordPieceTokenizer(
vocab_file=vocab_file,
add_special_tokens=add_special_tokens,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
clean_text=clean_text,
handle_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
lowercase=do_lower_case,
wordpieces_prefix=wordpieces_prefix,
),
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
| 27,042 | 41.320814 | 183 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
import argparse
import logging
import os
import pickle
import sys
import torch
import transformers.tokenization_transfo_xl as data_utils
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
TransfoXLConfig,
TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl,
)
from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
logging.basicConfig(level=logging.INFO)
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules["data_utils"] = data_utils
sys.modules["vocabulary"] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(
tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop("vocab", None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help="An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 4,913 | 38 | 121 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/configuration_ctrl.py | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Salesforce CTRL configuration """
import logging
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP = {"ctrl": "https://storage.googleapis.com/sf-ctrl/pytorch/ctrl-config.json"}
class CTRLConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of an :class:`~transformers.CTRLModel`.
It is used to instantiate an CTRL model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the `ctrl <https://huggingface.co/ctrl>`__ architecture from SalesForce.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 246534):
Vocabulary size of the CTRL model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.CTRLModel`.
n_positions (:obj:`int`, optional, defaults to 256):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
n_ctx (:obj:`int`, optional, defaults to 256):
Dimensionality of the causal mask (usually same as n_positions).
n_embd (:obj:`int`, optional, defaults to 1280):
Dimensionality of the embeddings and hidden states.
dff (:obj:`int`, optional, defaults to 8192):
Dimensionality of the inner dimension of the FFN.
n_layer (:obj:`int`, optional, defaults to 48):
Number of hidden layers in the Transformer encoder.
n_head (:obj:`int`, optional, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
resid_pdrop (:obj:`float`, optional, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (:obj:`int`, optional, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (:obj:`float`, optional, defaults to 1e-6):
The epsilon to use in the layer normalization layers
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example::
from transformers import CTRLModel, CTRLConfig
# Initializing a CTRL configuration
configuration = CTRLConfig()
# Initializing a model from the configuration
model = CTRLModel(configuration)
# Accessing the model configuration
configuration = model.config
Attributes:
pretrained_config_archive_map (Dict[str, str]):
A dictionary containing all the available pre-trained checkpoints.
"""
pretrained_config_archive_map = CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "ctrl"
def __init__(
self,
vocab_size=246534,
n_positions=256,
n_ctx=256,
n_embd=1280,
dff=8192,
n_layer=48,
n_head=16,
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-6,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.dff = dff
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| 5,769 | 39.069444 | 120 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A TF 2.0 Adaptive Softmax for Transformer XL model.
"""
import tensorflow as tf
from .modeling_tf_utils import shape_list
class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer):
def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [vocab_size]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.keep_order = keep_order
self.out_layers = []
self.out_projs = []
def build(self, input_shape):
if self.n_clusters > 0:
self.cluster_weight = self.add_weight(
shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight"
)
self.cluster_bias = self.add_weight(
shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias"
)
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
weight = self.add_weight(
shape=(self.d_embed, self.d_proj),
initializer="zeros",
trainable=True,
name="out_projs_._{}".format(i),
)
self.out_projs.append(weight)
else:
self.out_projs.append(None)
weight = self.add_weight(
shape=(self.vocab_size, self.d_embed,),
initializer="zeros",
trainable=True,
name="out_layers_._{}_._weight".format(i),
)
bias = self.add_weight(
shape=(self.vocab_size,),
initializer="zeros",
trainable=True,
name="out_layers_._{}_._bias".format(i),
)
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = self.d_embed // (self.div_val ** i)
weight = self.add_weight(
shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name="out_projs_._{}".format(i)
)
self.out_projs.append(weight)
weight = self.add_weight(
shape=(r_idx - l_idx, d_emb_i,),
initializer="zeros",
trainable=True,
name="out_layers_._{}_._weight".format(i),
)
bias = self.add_weight(
shape=(r_idx - l_idx,),
initializer="zeros",
trainable=True,
name="out_layers_._{}_._bias".format(i),
)
self.out_layers.append((weight, bias))
super().build(input_shape)
@staticmethod
def _logit(x, W, b, proj=None):
y = x
if proj is not None:
y = tf.einsum("ibd,ed->ibe", y, proj)
return tf.einsum("ibd,nd->ibn", y, W) + b
@staticmethod
def _gather_logprob(logprob, target):
lp_size = shape_list(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
def call(self, inputs, return_mean=True, training=False):
hidden, target = inputs
head_logprob = 0
if self.n_clusters == 0:
output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
if target is not None:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
out = tf.nn.log_softmax(output, axis=-1)
else:
hidden_sizes = shape_list(hidden)
out = []
loss = tf.zeros(hidden_sizes[:2], dtype=tf.float32)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
if self.div_val == 1:
cur_W = self.out_layers[0][0][l_idx:r_idx]
cur_b = self.out_layers[0][1][l_idx:r_idx]
else:
cur_W = self.out_layers[i][0]
cur_b = self.out_layers[i][1]
if i == 0:
cur_W = tf.concat([cur_W, self.cluster_weight], 0)
cur_b = tf.concat([cur_b, self.cluster_bias], 0)
head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
head_logprob = tf.nn.log_softmax(head_logit)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
else:
tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
tail_logprob = tf.nn.log_softmax(tail_logit)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(logprob_i)
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(shape_list(loss), dtype=tf.int64))
out = tf.concat(out, axis=-1)
if target is not None:
if return_mean:
loss = tf.reduce_mean(loss)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(loss)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "")
return out
| 7,702 | 42.03352 | 118 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BART checkpoint."""
import argparse
import logging
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.modeling_bart import _make_linear_from_emb
FAIRSEQ_MODELS = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
extra_arch = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SAMPLE_TEXT = " Hello world! cécé herlolip"
rename_keys = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
IGNORE_KEYS = ["encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor"]
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
def load_xsum_checkpoint(checkpoint_path):
"""Checkpoint path should end in model.pt"""
sd = torch.load(checkpoint_path, map_location="cpu")
hub_interface = torch.hub.load("pytorch/fairseq", "bart.large.cnn").eval()
hub_interface.model.load_state_dict(sd["model"])
return hub_interface
@torch.no_grad()
def convert_bart_checkpoint(checkpoint_path, pytorch_dump_folder_path, hf_checkpoint_name=None):
"""
Copy/paste/tweak model's weights to our BERT structure.
"""
if not os.path.exists(checkpoint_path):
bart = torch.hub.load("pytorch/fairseq", checkpoint_path).eval()
else:
bart = load_xsum_checkpoint(checkpoint_path)
bart.model.upgrade_state_dict(bart.model.state_dict())
if hf_checkpoint_name is None:
hf_checkpoint_name = checkpoint_path.replace(".", "-")
config = BartConfig.from_pretrained(hf_checkpoint_name)
tokens = bart.encode(SAMPLE_TEXT).unsqueeze(0)
tokens2 = BartTokenizer.from_pretrained(hf_checkpoint_name).encode(SAMPLE_TEXT, return_tensors="pt").unsqueeze(0)
assert torch.eq(tokens, tokens2).all()
if checkpoint_path == "bart.large.mnli":
state_dict = bart.state_dict()
remove_ignore_keys_(state_dict)
state_dict["model.shared.weight"] = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
model = BartForSequenceClassification(config).eval()
model.load_state_dict(state_dict)
fairseq_output = bart.predict("mnli", tokens, return_logits=True)
new_model_outputs = model(tokens)[0] # logits
else: # no classification heads to worry about
state_dict = bart.model.state_dict()
remove_ignore_keys_(state_dict)
state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
fairseq_output = bart.extract_features(tokens)
if hf_checkpoint_name == "bart-large":
model = BartModel(config).eval()
model.load_state_dict(state_dict)
new_model_outputs = model(tokens).model[0]
else:
model = BartForConditionalGeneration(config).eval() # an existing summarization ckpt
model.model.load_state_dict(state_dict)
if hasattr(model, "lm_head"):
model.lm_head = _make_linear_from_emb(model.model.shared)
new_model_outputs = model.model(tokens)[0]
# Check results
assert fairseq_output.shape == new_model_outputs.shape
assert (fairseq_output == new_model_outputs).all().item()
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
model.save_pretrained(pytorch_dump_folder_path)
def remove_ignore_keys_(state_dict):
for k in IGNORE_KEYS:
state_dict.pop(k, None)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
args = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 5,400 | 38.137681 | 119 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_t5.py | # coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
import copy
import itertools
import logging
import math
import os
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from .configuration_t5 import T5Config
from .file_utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, prune_linear_layer
logger = logging.getLogger(__name__)
####################################################
# This dict contrains shortcut names and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_MAP = {
"t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-small-pytorch_model.bin",
"t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-base-pytorch_model.bin",
"t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-large-pytorch_model.bin",
"t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-3b-pytorch_model.bin",
"t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-11b-pytorch_model.bin",
}
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
# elif scope_names[0] == 'scale':
# pointer = getattr(pointer, 'weight')
# elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':
# pointer = getattr(pointer, 'bias')
# elif scope_names[0] == 'squad':
# pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, name))
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
# logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)
####################################################
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
""" Construct a layernorm module in the T5 style
No bias and no substraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
variance = x.pow(2).mean(-1, keepdim=True)
x = x / torch.sqrt(variance + self.variance_epsilon)
return self.weight * x
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
h = self.wi(hidden_states)
h = F.relu(h)
h = self.dropout(h)
h = self.wo(h)
return h
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
self.DenseReluDense = T5DenseReluDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
norm_x = self.layer_norm(hidden_states)
y = self.DenseReluDense(norm_x)
layer_output = hidden_states + self.dropout(y)
return layer_output
class T5Attention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.layer_id = next(T5Attention.NEW_ID)
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.output_attentions = config.output_attentions
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.d_kv = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.d_kv
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_heads, self.d_kv)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.d_kv * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen):
""" Compute binned relative position bias """
context_position = torch.arange(qlen, dtype=torch.long)[:, None]
memory_position = torch.arange(klen, dtype=torch.long)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=not self.is_decoder,
num_buckets=self.relative_attention_num_buckets,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)
return values
def forward(self, input, mask=None, kv=None, position_bias=None, cache=None, head_mask=None):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = kv.size(1)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)
q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
# q = q / math.sqrt(dim_per_head) # No scaling in T5
scores = torch.einsum("bnqd,bnkd->bnqk", q, k) # (bs, n_heads, qlen, klen)
if position_bias is None:
if not self.has_relative_attention_bias:
raise ValueError("No position_bias provided and no weights to compute position_bias")
position_bias = self.compute_bias(qlen, klen)
if mask is not None:
position_bias = position_bias + mask # (bs, n_heads, qlen, klen)
scores += position_bias
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
context = self.o(context)
outputs = (context,)
if self.output_attentions:
outputs = outputs + (weights,)
if self.has_relative_attention_bias:
outputs = outputs + (position_bias,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states, attention_mask=None, position_bias=None, head_mask=None):
norm_x = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
norm_x, mask=attention_mask, position_bias=position_bias, head_mask=head_mask
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states, kv, attention_mask=None, position_bias=None, head_mask=None):
norm_x = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
norm_x, mask=attention_mask, kv=kv, position_bias=position_bias, head_mask=head_mask
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))
self.layer.append(T5LayerFF(config))
else:
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
head_mask=None,
):
self_attention_outputs = self.layer[0](
hidden_states, attention_mask=attention_mask, position_bias=position_bias, head_mask=head_mask
)
hidden_states = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights
if not self.is_decoder:
hidden_states = self.layer[1](hidden_states)
else:
cross_attention_outputs = self.layer[1](
hidden_states,
kv=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
head_mask=head_mask,
)
hidden_states = cross_attention_outputs[0]
outputs = (
outputs + cross_attention_outputs[1:]
) # Keep cross-attention outputs and relative position weights
hidden_states = self.layer[2](hidden_states)
outputs = (hidden_states,) + outputs # add attentions if we output them
return outputs # hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
class T5PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = T5Config
pretrained_model_archive_map = T5_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
""" Initialize the weights """
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
d_kv = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in lm_labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `lm_labels` has only positive values and -100"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to intialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
if attention_mask is None:
attention_mask = torch.ones(batch_size, seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(batch_size, encoder_seq_length).to(inputs_embeds.device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
seq_ids = torch.arange(seq_length, device=inputs_embeds.device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = causal_mask.to(attention_mask)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -1e9 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
# extended_attention_mask = (extended_attention_mask == extended_attention_mask.transpose(-1, -2))
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
if self.is_decoder:
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask == encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_layers
all_hidden_states = ()
all_attentions = ()
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, layer_module in enumerate(self.block):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
head_mask=head_mask[i],
)
# layer_outputs is a tuple with:
# hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
hidden_states = layer_outputs[0]
if i == 0:
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[2 if self.output_attentions else 1]
if self.is_decoder:
encoder_decoder_position_bias = layer_outputs[4 if self.output_attentions else 2]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],) # We keep only self-attention weights for now
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
T5_START_DOCSTRING = r""" The T5 model was proposed in
`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`_
by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.
It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`:
https://arxiv.org/abs/1910.10683
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
T5 is a model with relative position embeddings so you should be able to pad the inputs on
Indices can be obtained using :class:`transformers.T5Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
To know more on how to prepare :obj:`input_ids` for pre-training take a look at
`T5 Training <./t5.html#training>`_ .
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`):
Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`)
`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder.
Used in the cross-attention of the decoder.
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`, defaults to :obj:`None`):
Provide for sequence to sequence training. T5 uses the pad_token_id as the starting token for decoder_input_ids generation.
To know more on how to prepare :obj:`decoder_input_ids` for pre-training take a look at
`T5 Training <./t5.html#training>`_ .
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`):
Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
head_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.T5Config`) and inputs.
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import T5Tokenizer, T5Model
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = T5Model.from_pretrained('t5-small')
input_ids = tokenizer.encode("Hello, my dog is cute", return_tensors="pt") # Batch size 1
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
)
return decoder_outputs + encoder_outputs
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
@add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
lm_labels=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
head_mask=None,
):
r"""
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`.
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.T5Config`) and inputs.
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`lm_label` is provided):
Classification loss (cross entropy).
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention.
Examples::
from transformers import T5Tokenizer, T5ForConditionalGeneration
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = T5ForConditionalGeneration.from_pretrained('t5-small')
input_ids = tokenizer.encode("Hello, my dog is cute", return_tensors="pt") # Batch size 1
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = T5ForConditionalGeneration.from_pretrained('t5-small')
input_ids = tokenizer.encode("summarize: Hello, my dog is cute", return_tensors="pt") # Batch size 1
outputs = model.generate(input_ids)
"""
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask
)
hidden_states = encoder_outputs[0]
if lm_labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(lm_labels)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
)
sequence_output = decoder_outputs[0]
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim ** -0.5)
lm_logits = self.lm_head(sequence_output)
decoder_outputs = (lm_logits,) + decoder_outputs[1:] # Add hidden states and attention if they are here
if lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1))
decoder_outputs = (
loss,
) + decoder_outputs # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
return decoder_outputs + encoder_outputs
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
# first step
if type(past) is tuple:
encoder_outputs = past
else:
encoder_outputs = (past,)
return {
"decoder_input_ids": input_ids,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
}
def _reorder_cache(self, past, beam_idx):
# past does not have to be re-ordered for T5.
return past
| 47,057 | 46.294472 | 207 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_electra.py | import logging
import tensorflow as tf
from transformers import ElectraConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_bert import ACT2FN, TFBertEncoder, TFBertPreTrainedModel
from .modeling_tf_utils import get_initializer, shape_list
logger = logging.getLogger(__name__)
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP = {
"google/electra-small-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-generator/tf_model.h5",
"google/electra-base-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-generator/tf_model.h5",
"google/electra-large-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-generator/tf_model.h5",
"google/electra-small-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-discriminator/tf_model.h5",
"google/electra-base-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-discriminator/tf_model.h5",
"google/electra-large-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-discriminator/tf_model.h5",
}
class TFElectraEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.initializer_range = config.initializer_range
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.embedding_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="position_embeddings",
)
self.token_type_embeddings = tf.keras.layers.Embedding(
config.type_vocab_size,
config.embedding_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="token_type_embeddings",
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def build(self, input_shape):
"""Build shared word embedding layer """
with tf.name_scope("word_embeddings"):
# Create and initialize weights. The random normal initializer was chosen
# arbitrarily, and works well.
self.word_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, mode="embedding", training=False):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs, training=training)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, position_ids, token_type_ids, inputs_embeds = inputs
if input_ids is not None:
input_shape = shape_list(input_ids)
else:
input_shape = shape_list(inputs_embeds)[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
if inputs_embeds is None:
inputs_embeds = tf.gather(self.word_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
batch_size = shape_list(inputs)[0]
length = shape_list(inputs)[1]
x = tf.reshape(inputs, [-1, self.embedding_size])
logits = tf.matmul(x, self.word_embeddings, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
class TFElectraDiscriminatorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense")
self.dense_prediction = tf.keras.layers.Dense(1, name="dense_prediction")
self.config = config
def call(self, discriminator_hidden_states, training=False):
hidden_states = self.dense(discriminator_hidden_states)
hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
logits = tf.squeeze(self.dense_prediction(hidden_states))
return logits
class TFElectraGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = ACT2FN["gelu"](hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class TFElectraPreTrainedModel(TFBertPreTrainedModel):
config_class = ElectraConfig
pretrained_model_archive_map = TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "electra"
def get_extended_attention_mask(self, attention_mask, input_shape):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
class TFElectraMainLayer(TFElectraPreTrainedModel):
config_class = ElectraConfig
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.embeddings = TFElectraEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
head_mask = self.get_head_mask(head_mask)
hidden_states = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=training)
hidden_states = self.encoder([hidden_states, extended_attention_mask, head_mask], training=training)
return hidden_states
ELECTRA_START_DOCSTRING = r"""
This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.ElectraConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ELECTRA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.ElectraTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, embedding_dim)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
"hidden size and embedding size are different."
""
"Both the generator and discriminator checkpoints may be loaded into this model.",
ELECTRA_START_DOCSTRING,
)
class TFElectraModel(TFElectraPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.electra = TFElectraMainLayer(config, name="electra")
def get_input_embeddings(self):
return self.electra.embeddings
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import ElectraTokenizer, TFElectraModel
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = TFElectraModel.from_pretrained('google/electra-small-discriminator')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.electra(inputs, **kwargs)
return outputs
@add_start_docstrings(
"""
Electra model with a binary classification head on top as used during pre-training for identifying generated
tokens.
Even though both the discriminator and generator may be loaded into this model, the discriminator is
the only model of the two to have the correct classification head to be used for this model.""",
ELECTRA_START_DOCSTRING,
)
class TFElectraForPreTraining(TFElectraPreTrainedModel):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.electra = TFElectraMainLayer(config, name="electra")
self.discriminator_predictions = TFElectraDiscriminatorPredictions(config, name="discriminator_predictions")
def get_input_embeddings(self):
return self.electra.embeddings
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Prediction scores of the head (scores for each token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import ElectraTokenizer, TFElectraForPreTraining
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = TFElectraForPreTraining.from_pretrained('google/electra-small-discriminator')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
scores = outputs[0]
"""
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, training=training
)
discriminator_sequence_output = discriminator_hidden_states[0]
logits = self.discriminator_predictions(discriminator_sequence_output)
output = (logits,)
output += discriminator_hidden_states[1:]
return output # (loss), scores, (hidden_states), (attentions)
class TFElectraMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states, training=False):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""
Electra model with a language modeling head on top.
Even though both the discriminator and generator may be loaded into this model, the generator is
the only model of the two to have been trained for the masked language modeling task.""",
ELECTRA_START_DOCSTRING,
)
class TFElectraForMaskedLM(TFElectraPreTrainedModel):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.electra = TFElectraMainLayer(config, name="electra")
self.generator_predictions = TFElectraGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
self.generator_lm_head = TFElectraMaskedLMHead(config, self.electra.embeddings, name="generator_lm_head")
def get_input_embeddings(self):
return self.electra.embeddings
def get_output_embeddings(self):
return self.generator_lm_head
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
prediction_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import ElectraTokenizer, TFElectraForMaskedLM
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-generator')
model = TFElectraForMaskedLM.from_pretrained('google/electra-small-generator')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores = outputs[0]
"""
generator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, training=training
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=training)
prediction_scores = self.generator_lm_head(prediction_scores, training=training)
output = (prediction_scores,)
output += generator_hidden_states[1:]
return output # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings(
"""
Electra model with a token classification head on top.
Both the discriminator and generator may be loaded into this model.""",
ELECTRA_START_DOCSTRING,
)
class TFElectraForTokenClassification(TFElectraPreTrainedModel):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.electra = TFElectraMainLayer(config, name="electra")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(config.num_labels, name="classifier")
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import ElectraTokenizer, TFElectraForTokenClassification
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = TFElectraForTokenClassification.from_pretrained('google/electra-small-discriminator')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
scores = outputs[0]
"""
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, training=training
)
discriminator_sequence_output = discriminator_hidden_states[0]
discriminator_sequence_output = self.dropout(discriminator_sequence_output)
logits = self.classifier(discriminator_sequence_output)
output = (logits,)
output += discriminator_hidden_states[1:]
return output # (loss), scores, (hidden_states), (attentions)
| 28,786 | 45.732143 | 161 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_xlnet.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 XLNet model.
"""
import logging
import numpy as np
import tensorflow as tf
from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import (
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
keras_serializable,
shape_list,
)
logger = logging.getLogger(__name__)
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-tf_model.h5",
"xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-tf_model.h5",
}
def gelu(x):
""" Implementation of the gelu activation function.
XLNet is using OpenAI GPT's gelu
Also see https://arxiv.org/abs/1606.08415
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.sigmoid(x)
ACT2FN = {
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
}
class TFXLNetRelativeAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.initializer_range = config.initializer_range
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def build(self, input_shape):
initializer = get_initializer(self.initializer_range)
self.q = self.add_weight(
shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="q"
)
self.k = self.add_weight(
shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="k"
)
self.v = self.add_weight(
shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="v"
)
self.o = self.add_weight(
shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="o"
)
self.r = self.add_weight(
shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="r"
)
self.r_r_bias = self.add_weight(
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
)
self.r_s_bias = self.add_weight(
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_s_bias"
)
self.r_w_bias = self.add_weight(
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
)
self.seg_embed = self.add_weight(
shape=(2, self.n_head, self.d_head), initializer=initializer, trainable=True, name="seg_embed"
)
super().build(input_shape)
def prune_heads(self, heads):
raise NotImplementedError
def rel_shift(self, x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = shape_list(x)
x = tf.reshape(x, (x_size[1], x_size[0], x_size[2], x_size[3]))
x = x[1:, ...]
x = tf.reshape(x, (x_size[0], x_size[1] - 1, x_size[2], x_size[3]))
x = x[:, 0:klen, :, :]
# x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
def rel_attn_core(self, inputs, training=False):
"""Core relative positional attention operations."""
q_head, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask, head_mask = inputs
# content based attention score
ac = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift(bd, klen=shape_list(ac)[1])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = tf.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = tf.einsum("ijbs,ibns->ijbn", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == tf.float16:
attn_score = attn_score - 65500 * attn_mask
else:
attn_score = attn_score - 1e30 * attn_mask
# attention probability
attn_prob = tf.nn.softmax(attn_score, axis=1)
attn_prob = self.dropout(attn_prob, training=training)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# attention output
attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, attn_prob
return attn_vec
def post_attention(self, inputs, residual=True, training=False):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
h, attn_vec = inputs
attn_out = tf.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out, training=training)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def call(self, inputs, training=False):
(h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems, target_mapping, head_mask) = inputs
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and len(shape_list(mems)) > 1:
cat = tf.concat([mems, h], axis=0)
else:
cat = h
# content-based key head
k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
[q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask], training=training
)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention([h, attn_vec_h], training=training)
# g-stream
# query-stream query head
q_head_g = tf.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = tf.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
[q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask], training=training
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = tf.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
[q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask], training=training
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention([g, attn_vec_g], training=training)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and len(shape_list(mems)) > 1:
cat = tf.concat([mems, h], axis=0)
else:
cat = h
# content heads
q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
[q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask], training=training
)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention([h, attn_vec], training=training)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class TFXLNetFeedForward(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.layer_1 = tf.keras.layers.Dense(
config.d_inner, kernel_initializer=get_initializer(config.initializer_range), name="layer_1"
)
self.layer_2 = tf.keras.layers.Dense(
config.d_model, kernel_initializer=get_initializer(config.initializer_range), name="layer_2"
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def call(self, inp, training=False):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output, training=training)
output = self.layer_2(output)
output = self.dropout(output, training=training)
output = self.layer_norm(output + inp)
return output
class TFXLNetLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.rel_attn = TFXLNetRelativeAttention(config, name="rel_attn")
self.ff = TFXLNetFeedForward(config, name="ff")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def call(self, inputs, training=False):
outputs = self.rel_attn(inputs, training=training)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g, training=training)
output_h = self.ff(output_h, training=training)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class TFXLNetLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@keras_serializable
class TFXLNetMainLayer(tf.keras.layers.Layer):
config_class = XLNetConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.output_past = config.output_past
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.use_bfloat16 = config.use_bfloat16
self.initializer_range = config.initializer_range
self.word_embedding = TFSharedEmbeddings(
config.vocab_size, config.d_model, initializer_range=config.initializer_range, name="word_embedding"
)
self.layer = [TFXLNetLayer(config, name="layer_._{}".format(i)) for i in range(config.n_layer)]
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_input_embeddings(self):
return self.word_embedding
def build(self, input_shape):
initializer = get_initializer(self.initializer_range)
self.mask_emb = self.add_weight(
shape=(1, 1, self.d_model), initializer=initializer, trainable=True, name="mask_emb"
)
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen, dtype=tf.float32):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: TODO Lysandre didn't fill
mlen: TODO Lysandre didn't fill
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = tf.ones([qlen, qlen], dtype=dtype)
mask_u = tf.matrix_band_part(attn_mask, 0, -1)
mask_dia = tf.matrix_band_part(attn_mask, 0, 0)
attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype)
ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
if self.same_length:
mask_l = tf.matrix_band_part(attn_mask, -1, 0)
ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
return ret
def cache_mem(self, curr_out, prev_mem):
"""cache hidden states into memory."""
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = tf.concat([prev_mem, curr_out], 0)[-self.mem_len :]
return tf.stop_gradient(new_mem)
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = tf.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], axis=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = tf.tile(pos_emb, [1, bsz, 1])
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None, dtype=None):
"""create relative positional encoding."""
freq_seq = tf.range(0, self.d_model, 2.0)
if dtype is not None and dtype != tf.float32:
freq_seq = tf.cast(freq_seq, dtype=dtype)
inv_freq = 1 / (10000 ** (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = tf.range(beg, end, -1.0)
bwd_pos_seq = tf.range(-beg, -end, 1.0)
if dtype is not None and dtype != tf.float32:
fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)
bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype)
if self.clamp_len > 0:
fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len)
bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -self.clamp_len, self.clamp_len)
if bsz is not None:
# With bi_data, the batch size should be divisible by 2.
assert bsz % 2 == 0
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1)
else:
fwd_pos_seq = tf.range(beg, end, -1.0)
if dtype is not None and dtype != tf.float32:
fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)
if self.clamp_len > 0:
fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
return pos_emb
def call(
self,
inputs,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
mems = inputs[2] if len(inputs) > 2 else mems
perm_mask = inputs[3] if len(inputs) > 3 else perm_mask
target_mapping = inputs[4] if len(inputs) > 4 else target_mapping
token_type_ids = inputs[5] if len(inputs) > 5 else token_type_ids
input_mask = inputs[6] if len(inputs) > 6 else input_mask
head_mask = inputs[7] if len(inputs) > 7 else head_mask
inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds
assert len(inputs) <= 9, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
mems = inputs.get("mems", mems)
perm_mask = inputs.get("perm_mask", perm_mask)
target_mapping = inputs.get("target_mapping", target_mapping)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
input_mask = inputs.get("input_mask", input_mask)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 9, "Too many inputs."
else:
input_ids = inputs
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = tf.transpose(input_ids, perm=(1, 0))
qlen, bsz = shape_list(input_ids)[:2]
elif inputs_embeds is not None:
inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
qlen, bsz = shape_list(inputs_embeds)[:2]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = tf.transpose(token_type_ids, perm=(1, 0)) if token_type_ids is not None else None
input_mask = tf.transpose(input_mask, perm=(1, 0)) if input_mask is not None else None
attention_mask = tf.transpose(attention_mask, perm=(1, 0)) if attention_mask is not None else None
perm_mask = tf.transpose(perm_mask, perm=(1, 2, 0)) if perm_mask is not None else None
target_mapping = tf.transpose(target_mapping, perm=(1, 2, 0)) if target_mapping is not None else None
mlen = shape_list(mems[0])[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = tf.bfloat16 if self.use_bfloat16 else tf.float32
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, (
"You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
)
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - tf.cast(attention_mask, dtype=dtype_float)
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
mems_mask = tf.zeros([shape_list(data_mask)[0], mlen, bsz], dtype=dtype_float)
data_mask = tf.concat([mems_mask, data_mask], axis=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = tf.cast(attn_mask > 0, dtype=dtype_float)
if attn_mask is not None:
non_tgt_mask = -tf.eye(qlen, dtype=dtype_float)
non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=dtype_float), non_tgt_mask], axis=-1)
non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=dtype_float)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k, training=training)
if target_mapping is not None:
word_emb_q = tf.tile(self.mask_emb, [shape_list(target_mapping)[0], bsz, 1])
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q, training=training)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32)
cat_ids = tf.concat([mem_pad, token_type_ids], 0)
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = tf.cast(tf.logical_not(tf.equal(token_type_ids[:, None], cat_ids[None, :])), tf.int32)
seg_mat = tf.one_hot(seg_mat, 2, dtype=dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz, dtype=dtype_float)
pos_emb = self.dropout(pos_emb, training=training)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
# cache new mems
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
[output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems[i], target_mapping, head_mask[i]],
training=training,
)
output_h, output_g = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h, training=training)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (tf.transpose(output, perm=(1, 0, 2)),)
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
outputs = outputs + (new_mems,)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple(tf.transpose(h, perm=(1, 0, 2)) for hs in hidden_states for h in hs)
else:
hidden_states = tuple(tf.transpose(hs, perm=(1, 0, 2)) for hs in hidden_states)
outputs = outputs + (hidden_states,)
if self.output_attentions:
attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
class TFXLNetPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
pretrained_model_archive_map = TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
XLNET_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.XLNetTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
perm_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputing raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class TFXLNetModel(TFXLNetPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXLNetMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLNetTokenizer, TFXLNetModel
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = TFXLNetModel.from_pretrained('xlnet-large-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class TFXLNetLMHeadModel(TFXLNetPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXLNetMainLayer(config, name="transformer")
self.lm_loss = TFXLNetLMHead(config, self.transformer.word_embedding, name="lm_loss")
def get_output_embeddings(self):
return self.lm_loss.input_embeddings
def prepare_inputs_for_generation(self, inputs, past, **model_kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = inputs.shape[0]
dummy_token = tf.zeros((effective_batch_size, 1), dtype=tf.int32)
inputs = tf.concat([inputs, dummy_token], axis=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = inputs.shape[1]
perm_mask = tf.zeros((effective_batch_size, sequence_length, sequence_length - 1), dtype=tf.float32)
perm_mask_seq_end = tf.ones((effective_batch_size, sequence_length, 1), dtype=tf.float32)
perm_mask = tf.concat([perm_mask, perm_mask_seq_end], axis=-1)
# We'll only predict the last token
target_mapping = tf.zeros((effective_batch_size, 1, sequence_length - 1), dtype=tf.float32)
target_mapping_seq_end = tf.ones((effective_batch_size, 1, 1), dtype=tf.float32)
target_mapping = tf.concat([target_mapping, target_mapping_seq_end], axis=-1)
inputs = {"inputs": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
prediction_scores (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
import numpy as np
from transformers import XLNetTokenizer, TFXLNetLMHeadModel
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = TFXLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=True))[None, :] # We will predict the masked token
perm_mask = np.zeros((1, input_ids.shape[1], input_ids.shape[1]))
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = np.zeros((1, 1, input_ids.shape[1])) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=tf.constant(perm_mask, dtype=tf.float32), target_mapping=tf.constant(target_mapping, dtype=tf.float32))
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_state = transformer_outputs[0]
logits = self.lm_loss(hidden_state)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
return outputs # return logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFXLNetMainLayer(config, name="transformer")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.logits_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj"
)
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
logits (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLNetTokenizer, TFXLNetForSequenceClassification
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = TFXLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
return outputs # return logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class TFXLNetForTokenClassification(TFXLNetPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFXLNetMainLayer(config, name="transformer")
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
logits (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLNetTokenizer, TFXLNetForTokenClassification
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = TFXLNetForTokenClassification.from_pretrained('xlnet-large-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
scores = outputs[0]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
output = transformer_outputs[0]
logits = self.classifier(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
return outputs # return logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXLNetMainLayer(config, name="transformer")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLNetTokenizer, TFXLNetForQuestionAnsweringSimple
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = TFXLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
outputs = (start_logits, end_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
return outputs # start_logits, end_logits, (mems), (hidden_states), (attentions)
# @add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
# the hidden-states output to compute `span start logits` and `span end logits`). """,
# XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
# class TFXLNetForQuestionAnswering(TFXLNetPreTrainedModel):
# r"""
# Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
# **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
# ``tf.Tensor`` of shape ``(batch_size, config.start_n_top)``
# Log probabilities for the top config.start_n_top start token possibilities (beam-search).
# **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
# ``tf.Tensor`` of shape ``(batch_size, config.start_n_top)``
# Indices for the top config.start_n_top start token possibilities (beam-search).
# **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
# ``tf.Tensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
# Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
# **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
# ``tf.Tensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
# Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
# **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
# ``tf.Tensor`` of shape ``(batch_size,)``
# Log probabilities for the ``is_impossible`` label of the answers.
# **mems**:
# list of ``tf.Tensor`` (one for each layer):
# that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
# if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
# See details in the docstring of the `mems` input above.
# **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
# list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
# of shape ``(batch_size, sequence_length, hidden_size)``:
# Hidden-states of the model at the output of each layer plus the initial embedding outputs.
# **attentions**: (`optional`, returned when ``config.output_attentions=True``)
# list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
# Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
# Examples::
# # For example purposes. Not runnable.
# tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
# model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
# input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
# start_positions = tf.constant([1])
# end_positions = tf.constant([3])
# outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
# loss, start_scores, end_scores = outputs[:2]
# """
# def __init__(self, config, *inputs, **kwargs):
# super().__init__(config, *inputs, **kwargs)
# self.start_n_top = config.start_n_top
# self.end_n_top = config.end_n_top
# self.transformer = TFXLNetMainLayer(config, name='transformer')
# self.start_logits = TFPoolerStartLogits(config, name='start_logits')
# self.end_logits = TFPoolerEndLogits(config, name='end_logits')
# self.answer_class = TFPoolerAnswerClass(config, name='answer_class')
# def call(self, inputs, training=False):
# transformer_outputs = self.transformer(inputs, training=training)
# hidden_states = transformer_outputs[0]
# start_logits = self.start_logits(hidden_states, p_mask=p_mask)
# outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
# if start_positions is not None and end_positions is not None:
# # If we are on multi-GPU, let's remove the dimension added by batch splitting
# for x in (start_positions, end_positions, cls_index, is_impossible):
# if x is not None and x.dim() > 1:
# x.squeeze_(-1)
# # during training, compute the end logits based on the ground truth of the start position
# end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
# loss_fct = CrossEntropyLoss()
# start_loss = loss_fct(start_logits, start_positions)
# end_loss = loss_fct(end_logits, end_positions)
# total_loss = (start_loss + end_loss) / 2
# if cls_index is not None and is_impossible is not None:
# # Predict answerability from the representation of CLS and START
# cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
# loss_fct_cls = nn.BCEWithLogitsLoss()
# cls_loss = loss_fct_cls(cls_logits, is_impossible)
# # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
# total_loss += cls_loss * 0.5
# outputs = (total_loss,) + outputs
# else:
# # during inference, compute the end logits based on beam search
# bsz, slen, hsz = hidden_states.size()
# start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
# start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
# start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
# start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
# start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
# hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
# p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
# end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
# end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
# end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
# end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
# end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
# start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) # get the representation of START as weighted sum of hidden states
# cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) # Shape (batch size,): one single `cls_logits` for each sample
# outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# # or (if labels are provided) (total_loss,)
# return outputs
| 60,495 | 48.024311 | 171 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
import argparse
import logging
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
logging.basicConfig(level=logging.INFO)
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig.from_json_file(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help="An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.",
)
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 2,641 | 34.702703 | 118 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_xlm_roberta.py | # coding=utf-8
# Copyright 2019 Facebook AI Research and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch XLM-RoBERTa model. """
import logging
from .configuration_xlm_roberta import XLMRobertaConfig
from .file_utils import add_start_docstrings
from .modeling_roberta import (
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
logger = logging.getLogger(__name__)
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlm-roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-base-pytorch_model.bin",
"xlm-roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-pytorch_model.bin",
"xlm-roberta-large-finetuned-conll02-dutch": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-dutch-pytorch_model.bin",
"xlm-roberta-large-finetuned-conll02-spanish": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-spanish-pytorch_model.bin",
"xlm-roberta-large-finetuned-conll03-english": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-english-pytorch_model.bin",
"xlm-roberta-large-finetuned-conll03-german": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-german-pytorch_model.bin",
}
XLM_ROBERTA_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLMRobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
@add_start_docstrings(
"The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
XLM_ROBERTA_START_DOCSTRING,
)
class XLMRobertaModel(RobertaModel):
"""
This class overrides :class:`~transformers.RobertaModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""XLM-RoBERTa Model with a `language modeling` head on top. """, XLM_ROBERTA_START_DOCSTRING,
)
class XLMRobertaForMaskedLM(RobertaForMaskedLM):
"""
This class overrides :class:`~transformers.RobertaForMaskedLM`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
XLM_ROBERTA_START_DOCSTRING,
)
class XLMRobertaForSequenceClassification(RobertaForSequenceClassification):
"""
This class overrides :class:`~transformers.RobertaForSequenceClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""XLM-RoBERTa Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
XLM_ROBERTA_START_DOCSTRING,
)
class XLMRobertaForMultipleChoice(RobertaForMultipleChoice):
"""
This class overrides :class:`~transformers.RobertaForMultipleChoice`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""XLM-RoBERTa Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLM_ROBERTA_START_DOCSTRING,
)
class XLMRobertaForTokenClassification(RobertaForTokenClassification):
"""
This class overrides :class:`~transformers.RobertaForTokenClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = XLMRobertaConfig
pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
| 5,431 | 41.771654 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_camembert.py | # coding=utf-8
# Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CamemBERT model. """
import logging
from .configuration_camembert import CamembertConfig
from .file_utils import add_start_docstrings
from .modeling_roberta import (
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
logger = logging.getLogger(__name__)
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"camembert-base": "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-pytorch_model.bin",
"umberto-commoncrawl-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/Musixmatch/umberto-commoncrawl-cased-v1/pytorch_model.bin",
"umberto-wikipedia-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/Musixmatch/umberto-wikipedia-uncased-v1/pytorch_model.bin",
}
CAMEMBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.CamembertConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
@add_start_docstrings(
"The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.",
CAMEMBERT_START_DOCSTRING,
)
class CamembertModel(RobertaModel):
"""
This class overrides :class:`~transformers.RobertaModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model with a `language modeling` head on top. """, CAMEMBERT_START_DOCSTRING,
)
class CamembertForMaskedLM(RobertaForMaskedLM):
"""
This class overrides :class:`~transformers.RobertaForMaskedLM`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class CamembertForSequenceClassification(RobertaForSequenceClassification):
"""
This class overrides :class:`~transformers.RobertaForSequenceClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class CamembertForMultipleChoice(RobertaForMultipleChoice):
"""
This class overrides :class:`~transformers.RobertaForMultipleChoice`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class CamembertForTokenClassification(RobertaForTokenClassification):
"""
This class overrides :class:`~transformers.RobertaForTokenClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD
(a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits` """,
CAMEMBERT_START_DOCSTRING,
)
class CamembertForQuestionAnswering(RobertaForQuestionAnswering):
"""
This class overrides :class:`~transformers.RobertaForQuestionAnswering`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
| 5,589 | 39.215827 | 148 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
import argparse
import json
import logging
import numpy
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME
from transformers.tokenization_xlm import VOCAB_FILES_NAMES
logging.basicConfig(level=logging.INFO)
def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path):
# Load checkpoint
chkpt = torch.load(xlm_checkpoint_path, map_location="cpu")
state_dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
two_levels_state_dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
two_levels_state_dict[k] = v
else:
two_levels_state_dict["transformer." + k] = v
config = chkpt["params"]
config = dict((n, v) for n, v in config.items() if not isinstance(v, (torch.FloatTensor, numpy.ndarray)))
vocab = chkpt["dico_word2id"]
vocab = dict((s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@", ""), i) for s, i in vocab.items())
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(two_levels_state_dict, pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(json.dumps(config, indent=2) + "\n")
print("Save vocab file to {}".format(pytorch_config_dump_path))
with open(pytorch_vocab_dump_path, "w", encoding="utf-8") as f:
f.write(json.dumps(vocab, indent=2) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 2,970 | 36.1375 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py | import argparse
import os
import torch
from transformers.file_utils import WEIGHTS_NAME
DIALOGPT_MODELS = ["small", "medium", "large"]
OLD_KEY = "lm_head.decoder.weight"
NEW_KEY = "lm_head.weight"
def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str):
d = torch.load(checkpoint_path)
d[NEW_KEY] = d.pop(OLD_KEY)
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
args = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
pytorch_dump_folder_path = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path, pytorch_dump_folder_path,
)
| 923 | 27.875 | 85 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_xlm.py | # coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 XLM model.
"""
import itertools
import logging
import math
import numpy as np
import tensorflow as tf
from .configuration_xlm import XLMConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_list
logger = logging.getLogger(__name__)
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlm-mlm-en-2048": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-tf_model.h5",
"xlm-mlm-ende-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-tf_model.h5",
"xlm-mlm-enfr-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-tf_model.h5",
"xlm-mlm-enro-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-tf_model.h5",
"xlm-mlm-tlm-xnli15-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-tf_model.h5",
"xlm-mlm-xnli15-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-tf_model.h5",
"xlm-clm-enfr-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-tf_model.h5",
"xlm-clm-ende-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-tf_model.h5",
"xlm-mlm-17-1280": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-tf_model.h5",
"xlm-mlm-100-1280": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-tf_model.h5",
}
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out[:, 0::2] = tf.constant(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = tf.constant(np.cos(position_enc[:, 1::2]))
def gelu(x):
""" Gaussian Error Linear Unit.
Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def get_masks(slen, lengths, causal, padding_mask=None, dtype=tf.float32):
"""
Generate hidden states mask, and optionally an attention mask.
"""
bs = shape_list(lengths)[0]
if padding_mask is not None:
mask = padding_mask
else:
# assert lengths.max().item() <= slen
alen = tf.range(slen)
mask = tf.math.less(alen, lengths[:, tf.newaxis])
# attention mask is the same as mask, or triangular inferior attention (causal)
if causal:
attn_mask = tf.less_equal(
tf.tile(alen[tf.newaxis, tf.newaxis, :], (bs, slen, 1)), alen[tf.newaxis, :, tf.newaxis]
)
else:
attn_mask = mask
# sanity check
# assert shape_list(mask) == [bs, slen]
tf.debugging.assert_equal(shape_list(mask), [bs, slen])
assert causal is False or shape_list(attn_mask) == [bs, slen, slen]
mask = tf.cast(mask, dtype=dtype)
attn_mask = tf.cast(attn_mask, dtype=dtype)
return mask, attn_mask
class TFMultiHeadAttention(tf.keras.layers.Layer):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, config, **kwargs):
super().__init__(**kwargs)
self.layer_id = next(TFMultiHeadAttention.NEW_ID)
self.output_attentions = config.output_attentions
self.dim = dim
self.n_heads = n_heads
assert self.dim % self.n_heads == 0
self.q_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin")
self.k_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin")
self.v_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin")
self.out_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin")
self.dropout = tf.keras.layers.Dropout(config.attention_dropout)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
def call(self, inputs, training=False):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
input, mask, kv, cache, head_mask = inputs
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = shape_list(input)
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = shape_list(kv)[1]
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
n_heads = self.n_heads
dim_per_head = self.dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)
def shape(x):
""" projection """
return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
def unshape(x):
""" compute context """
return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)
v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)
mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
# scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
scores = scores - 1e30 * (1.0 - mask)
weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
outputs = (self.out_lin(context),)
if self.output_attentions:
outputs = outputs + (weights,)
return outputs
class TFTransformerFFN(tf.keras.layers.Layer):
def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):
super().__init__(**kwargs)
self.lin1 = tf.keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1")
self.lin2 = tf.keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2")
self.act = tf.keras.layers.Activation(gelu) if config.gelu_activation else tf.keras.activations.relu
self.dropout = tf.keras.layers.Dropout(config.dropout)
def call(self, input, training=False):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = self.dropout(x, training=training)
return x
class TFXLMMainLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# encoder / decoder, output layer
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError("Currently XLM can only be used as an encoder")
# self.with_output = with_output
self.causal = config.causal
# dictionary / languages
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
# self.dico = dico
# self.id2lang = config.id2lang
# self.lang2id = config.lang2id
# assert len(self.dico) == self.n_words
# assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = config.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = config.n_heads # 8 by default
self.n_layers = config.n_layers
assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
# embeddings
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.attention_dropout = tf.keras.layers.Dropout(config.attention_dropout)
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
self.dim,
embeddings_initializer=get_initializer(config.embed_init_std),
name="position_embeddings",
)
if config.sinusoidal_embeddings:
raise NotImplementedError
# create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = tf.keras.layers.Embedding(
self.n_langs,
self.dim,
embeddings_initializer=get_initializer(config.embed_init_std),
name="lang_embeddings",
)
self.embeddings = TFSharedEmbeddings(
self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings"
) # padding_idx=self.pad_index)
self.layer_norm_emb = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb")
# transformer layers
self.attentions = []
self.layer_norm1 = []
self.ffns = []
self.layer_norm2 = []
# if self.is_decoder:
# self.layer_norm15 = []
# self.encoder_attn = []
for i in range(self.n_layers):
self.attentions.append(
TFMultiHeadAttention(self.n_heads, self.dim, config=config, name="attentions_._{}".format(i))
)
self.layer_norm1.append(
tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1_._{}".format(i))
)
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(
TFTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name="ffns_._{}".format(i))
)
self.layer_norm2.append(
tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2_._{}".format(i))
)
if hasattr(config, "pruned_heads"):
pruned_heads = config.pruned_heads.copy().items()
config.pruned_heads = {}
for layer, heads in pruned_heads:
if self.attentions[int(layer)].n_heads == config.n_heads:
self.prune_heads({int(layer): list(map(int, heads))})
def get_input_embeddings(self):
return self.embeddings
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
inputs,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
training=False,
): # removed: src_enc=None, src_len=None
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
langs = inputs[2] if len(inputs) > 2 else langs
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
lengths = inputs[5] if len(inputs) > 5 else lengths
cache = inputs[6] if len(inputs) > 6 else cache
head_mask = inputs[7] if len(inputs) > 7 else head_mask
inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds
assert len(inputs) <= 9, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
langs = inputs.get("langs", langs)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
lengths = inputs.get("lengths", lengths)
cache = inputs.get("cache", cache)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 9, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
bs, slen = shape_list(input_ids)
elif inputs_embeds is not None:
bs, slen = shape_list(inputs_embeds)[:2]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if lengths is None:
if input_ids is not None:
lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1)
else:
lengths = tf.convert_to_tensor([slen] * bs, tf.int32)
# mask = input_ids != self.pad_index
# check inputs
# assert shape_list(lengths)[0] == bs
tf.debugging.assert_equal(shape_list(lengths)[0], bs)
# assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# position_ids
if position_ids is None:
position_ids = tf.expand_dims(tf.range(slen), axis=0)
else:
# assert shape_list(position_ids) == [bs, slen] # (slen, bs)
tf.debugging.assert_equal(shape_list(position_ids), [bs, slen])
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
# assert shape_list(langs) == [bs, slen] # (slen, bs)
tf.debugging.assert_equal(shape_list(langs), [bs, slen])
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.n_layers
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids)
if langs is not None and self.use_lang_emb and self.n_langs > 1:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = self.dropout(tensor, training=training)
tensor = tensor * mask[..., tf.newaxis]
# transformer layers
hidden_states = ()
attentions = ()
for i in range(self.n_layers):
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
attn_outputs = self.attentions[i]([tensor, attn_mask, None, cache, head_mask[i]], training=training)
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = self.dropout(attn, training=training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = F.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
tensor = tensor * mask[..., tf.newaxis]
# Add last hidden state
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
outputs = (tensor,)
if self.output_hidden_states:
outputs = outputs + (hidden_states,)
if self.output_attentions:
outputs = outputs + (attentions,)
return outputs # outputs, (hidden_states), (attentions)
class TFXLMPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLMConfig
pretrained_model_archive_map = TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
# Sometimes XLM has language embeddings so don't forget to build them as well if needed
inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
XLM_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
langs (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
A parallel sequence of tokens to be used to indicate the language of each token in the input.
Indices are languages ids which can be obtained from the language names by using two conversion mappings
provided in the configuration of the model (only provided for multilingual models).
More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and
the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).
See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__.
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
lengths (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Length of each sentence that can be used to avoid performing attention on padding token indices.
You can also use `attention_mask` for the same result (see above), kept here for compatbility.
Indices selected in ``[0, ..., input_ids.size(-1)]``:
cache (:obj:`Dict[str, tf.Tensor]`, `optional`, defaults to :obj:`None`):
dictionary with ``tf.Tensor`` that contains pre-computed
hidden-states (key and values in the attention blocks) as computed by the model
(see `cache` output below). Can be used to speed up sequential decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLM Model transformer outputing raw hidden-states without any specific head on top.",
XLM_START_DOCSTRING,
)
class TFXLMModel(TFXLMPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXLMMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLMTokenizer, TFXLMModel
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMModel.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
class TFXLMPredLayer(tf.keras.layers.Layer):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
if config.asm is False:
self.input_embeddings = input_embeddings
else:
raise NotImplementedError
# self.proj = nn.AdaptiveLogSoftmaxWithLoss(
# in_features=dim,
# n_classes=config.n_words,
# cutoffs=config.asm_cutoffs,
# div_value=config.asm_div_value,
# head_bias=True, # default is False
# )
def build(self, input_shape):
# The output weights are the same as the input embeddings, but there is an output-only bias for each token.
self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""The XLM Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLM_START_DOCSTRING,
)
class TFXLMWithLMHeadModel(TFXLMPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXLMMainLayer(config, name="transformer")
self.pred_layer = TFXLMPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj")
def get_output_embeddings(self):
return self.pred_layer.input_embeddings
def prepare_inputs_for_generation(self, inputs, **kwargs):
mask_token_id = self.config.mask_token_id
lang_id = self.config.lang_id
effective_batch_size = inputs.shape[0]
mask_token = tf.ones((effective_batch_size, 1), dtype=tf.int32) * mask_token_id
inputs = tf.concat([inputs, mask_token], axis=1)
if lang_id is not None:
langs = tf.ones_like(inputs) * lang_id
else:
langs = None
return {"inputs": inputs, "langs": langs}
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
prediction_scores (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLMTokenizer, TFXLMWithLMHeadModel
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
transformer_outputs = self.transformer(inputs, **kwargs)
output = transformer_outputs[0]
outputs = self.pred_layer(output)
outputs = (outputs,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLM_START_DOCSTRING,
)
class TFXLMForSequenceClassification(TFXLMPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFXLMMainLayer(config, name="transformer")
self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
logits (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLMTokenizer, TFXLMForSequenceClassification
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
labels = tf.constant([1])[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLM_START_DOCSTRING,
)
class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXLMMainLayer(config, name="transformer")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs"
)
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
start_scores (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` or :obj:`Numpy array` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import XLMTokenizer, TFXLMForQuestionAnsweringSimple
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
outputs = (start_logits, end_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
return outputs # start_logits, end_logits, (hidden_states), (attentions)
| 39,576 | 46.798309 | 159 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_bart.py | # coding=utf-8
# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BART model, ported from the fairseq repo."""
import logging
import random
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from .activations import ACT2FN
from .configuration_bart import BartConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, create_position_ids_from_input_ids
logger = logging.getLogger(__name__)
BART_PRETRAINED_MODEL_ARCHIVE_MAP = {
"bart-large": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large/pytorch_model.bin",
"bart-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-mnli/pytorch_model.bin",
"bart-large-cnn": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/pytorch_model.bin",
"bart-large-xsum": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-xsum/pytorch_model.bin",
}
BART_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matters related to general usage and behavior.
Parameters:
config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BART_GENERATION_EXAMPLE = r"""
Examples::
from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
# see ``examples/summarization/bart/evaluate_cnn.py`` for a longer example
model = BartForConditionalGeneration.from_pretrained('bart-large-cnn')
tokenizer = BartTokenizer.from_pretrained('bart-large-cnn')
ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
inputs = tokenizer.batch_encode_plus([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
# Generate Summary
summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
BART_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Use BartTokenizer.encode to produce them.
Padding will be ignored by default should you provide it.
Indices can be obtained using :class:`transformers.BartTokenizer.encode(text)`.
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices in input_ids.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`):
Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`)
`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder.
Used in the cross-attention of the decoder.
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`, defaults to :obj:`None`):
Provide for translation and summarization training. By default, the model will create this tensor by shifting the input_ids right, following the paper.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`):
Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default.
If you want to change padding behavior, you should read :func:`~transformers.modeling_bart._prepare_decoder_inputs` and modify.
See diagram 1 in the paper for more info on the default strategy
"""
def invert_mask(attention_mask):
assert attention_mask.dim() == 2
return attention_mask.eq(0)
def _prepare_bart_decoder_inputs(
config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32
):
"""Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if
none are provided. This mimics the default behavior in fairseq. To override it pass in masks.
Note: this is not called during generation
"""
pad_token_id = config.pad_token_id
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
bsz, tgt_len = decoder_input_ids.size()
if decoder_padding_mask is None:
decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
else:
decoder_padding_mask = invert_mask(decoder_padding_mask)
causal_mask = torch.triu(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len)), 1).to(
dtype=causal_mask_dtype, device=decoder_input_ids.device
)
return decoder_input_ids, decoder_padding_mask, causal_mask
class PretrainedBartModel(PreTrainedModel):
config_class = BartConfig
base_model_prefix = "model"
pretrained_model_archive_map = BART_PRETRAINED_MODEL_ARCHIVE_MAP
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
# Helper Functions, mostly for making masks
def _check_shapes(shape_1, shape2):
if shape_1 != shape2:
raise AssertionError("shape mismatch: {} != {}".format(shape_1, shape2))
def shift_tokens_right(input_ids, pad_token_id):
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def make_padding_mask(input_ids, padding_idx=1):
"""True for pad tokens"""
padding_mask = input_ids.eq(padding_idx)
if not padding_mask.any():
padding_mask = None
return padding_mask
# Helper Modules
class EncoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.output_attentions = config.output_attentions
self.self_attn = SelfAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
x, attn_weights = self.self_attn(
query=x, key=x, key_padding_mask=encoder_padding_mask, need_weights=self.output_attentions
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return x, attn_weights
class BartEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer
is a :class:`EncoderLayer`.
Args:
config: BartConfig
"""
def __init__(self, config: BartConfig, embed_tokens):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = config.max_position_embeddings
self.embed_tokens = embed_tokens
self.embed_positions = LearnedPositionalEmbedding(config.max_position_embeddings, embed_dim, self.padding_idx,)
self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = LayerNorm(embed_dim)
def forward(
self, input_ids, attention_mask=None,
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens.
Returns:
Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *self.output_hidden_states:* is True.
- **all_attentions** (List[Tensor]): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = invert_mask(attention_mask)
inputs_embeds = self.embed_tokens(input_ids)
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states, all_attentions = [], []
for encoder_layer in self.layers:
if self.output_hidden_states:
encoder_states.append(x)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
attn = None
else:
x, attn = encoder_layer(x, attention_mask)
if self.output_attentions:
all_attentions.append(attn)
if self.output_hidden_states:
encoder_states.append(x)
# T x B x C -> B x T x C
encoder_states = [hidden_state.transpose(0, 1) for hidden_state in encoder_states]
x = x.transpose(0, 1)
return x, encoder_states, all_attentions
class DecoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.output_attentions = config.output_attentions
self.self_attn = SelfAttention(
embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = SelfAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
):
residual = x
if layer_state is None:
layer_state = {}
# next line mutates layer state
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state,
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
need_weights=self.output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
x, _ = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.encoder_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
class BartDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer
is a :class:`DecoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: nn.Embedding):
super().__init__()
self.output_past = config.output_past
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = config.max_position_embeddings
self.embed_tokens = embed_tokens
self.embed_positions = LearnedPositionalEmbedding(
config.max_position_embeddings, config.d_model, self.padding_idx,
)
self.layers = nn.ModuleList(
[DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
self.layernorm_embedding = LayerNorm(config.d_model)
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
decoder_cached_states=None,
generation_mode=False,
**unused
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
decoder_cached_states (dict or None): dictionary used for storing state during generation
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
# embed positions
positions = self.embed_positions(input_ids, generation_mode=generation_mode)
if generation_mode:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
assert input_ids.ne(self.padding_idx).any()
x = self.embed_tokens(input_ids)
x += positions
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = ()
all_self_attns = ()
next_decoder_cache = []
for i, decoder_layer in enumerate(self.layers):
decoder_layer # type: DecoderLayer
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = decoder_cached_states[i] if decoder_cached_states is not None else None
x, layer_self_attn, layer_past = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
)
if self.output_past:
next_decoder_cache.append(layer_past.copy())
if self.output_hidden_states:
all_hidden_states += (x,)
if self.output_attentions:
all_self_attns += (layer_self_attn,)
# Convert to standart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
all_hidden_states = [hidden_state.transpose(0, 1) for hidden_state in all_hidden_states]
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
if self.output_past:
next_cache = ((encoder_hidden_states, encoder_padding_mask), next_decoder_cache)
else:
next_cache = None
return x, next_cache, all_hidden_states, list(all_self_attns)
def _reorder_buffer(attn_cache, new_order):
for k, input_buffer_k in attn_cache.items():
if input_buffer_k is not None:
attn_cache[k] = input_buffer_k.index_select(0, new_order)
return attn_cache
class SelfAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
encoder_decoder_attention=False, # otherwise self_attention
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.encoder_decoder_attention = encoder_decoder_attention
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
def _shape(self, tensor, dim_0, bsz):
return tensor.contiguous().view(dim_0, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
attn_mask: Optional[Tensor] = None,
need_weights=False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
static_kv = self.encoder_decoder_attention # type: bool
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
# get here for encoder decoder cause of static_kv
if layer_state is not None: # reuse k,v and encoder_padding_mask
saved_state = layer_state.get(self.cache_key, {})
if "prev_key" in saved_state:
# previous time steps are cached - no need to recompute key and value if they are static
if static_kv:
key = None
else:
saved_state = None
layer_state = {}
q = self.q_proj(query) * self.scaling
if static_kv:
if key is None:
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
k = self.k_proj(query)
v = self.v_proj(query)
q = self._shape(q, tgt_len, bsz)
if k is not None:
k = self._shape(k, -1, bsz)
if v is not None:
v = self._shape(v, -1, bsz)
if saved_state is not None:
k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
# Update cache
layer_state[self.cache_key] = {
"prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
"prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
"prev_key_padding_mask": key_padding_mask if not static_kv else None,
}
assert k is not None
src_len = k.size(1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
if attn_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
assert key_padding_mask is None or key_padding_mask.size()[:2] == (bsz, src_len,)
if key_padding_mask is not None: # don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
attn_weights = attn_weights.masked_fill(reshaped, float("-inf"))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training,)
assert v is not None
attn_output = torch.bmm(attn_probs, v)
assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
if need_weights:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
else:
attn_weights = None
return attn_output, attn_weights
def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
assert k is not None and v is not None
prev_key_padding_mask = saved_state.get("prev_key_padding_mask", None) # type: Optional[Tensor]
key_padding_mask = self._cat_prev_key_padding_mask(
key_padding_mask, prev_key_padding_mask, bsz, k.size(1), static_kv
)
return k, v, key_padding_mask
@staticmethod
def _cat_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None:
if static_kv:
new_key_padding_mask = prev_key_padding_mask
else:
new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
elif key_padding_mask is not None:
filler = torch.zeros(
batch_size,
src_len - key_padding_mask.size(1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat([filler, key_padding_mask], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
class BartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
# This can trivially be shared with RobertaClassificationHead
def __init__(
self, input_dim, inner_dim, num_classes, pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, x):
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(
self, num_embeddings: int, embedding_dim: int, padding_idx: int,
):
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
assert padding_idx is not None
num_embeddings += padding_idx + 1 # WHY?
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx)
def forward(self, input, generation_mode=False):
"""Input is expected to be of size [bsz x seqlen]."""
if generation_mode: # the position is our current step in the decoded sequence
pos = int(self.padding_idx + input.size(1))
positions = input.data.new(1, 1).fill_(pos)
else:
positions = create_position_ids_from_input_ids(input, self.padding_idx)
return super().forward(positions)
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True):
if torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a input_ids with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _filter_out_falsey_values(tup) -> Tuple:
"""Remove entries that are None or [] from an iterable."""
return tuple(x for x in tup if isinstance(x, torch.Tensor) or x)
# Public API
def _get_shape(t):
return getattr(t, "shape", None)
@add_start_docstrings(
"The bare BART Model outputting raw hidden-states without any specific head on top.", BART_START_DOCSTRING,
)
class BartModel(PretrainedBartModel):
def __init__(self, config: BartConfig):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = BartEncoder(config, self.shared)
self.decoder = BartDecoder(config, self.shared)
self.init_weights()
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
encoder_outputs=None, # type: Tuple
decoder_attention_mask=None,
decoder_cached_states=None,
generation_mode=False,
):
# make masks if user doesn't supply
if not generation_mode:
decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(
self.config,
input_ids,
decoder_input_ids=decoder_input_ids,
decoder_padding_mask=decoder_attention_mask,
causal_mask_dtype=self.shared.weight.dtype,
)
else:
decoder_padding_mask, causal_mask = None, None
assert decoder_input_ids is not None
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
assert isinstance(encoder_outputs, tuple)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
encoder_outputs[0],
attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
decoder_cached_states=decoder_cached_states,
generation_mode=generation_mode,
)
# Attention and hidden_states will be [] or None if they aren't needed
decoder_outputs = _filter_out_falsey_values(decoder_outputs) # type: tuple
assert isinstance(decoder_outputs[0], torch.Tensor)
encoder_outputs = _filter_out_falsey_values(encoder_outputs) # type: tuple
return decoder_outputs + encoder_outputs
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_output_embeddings(self):
return _make_linear_from_emb(self.shared) # make it on the fly
@add_start_docstrings(
"The BART Model with a language modeling head. Can be used for summarization.",
BART_START_DOCSTRING + BART_GENERATION_EXAMPLE,
)
class BartForConditionalGeneration(PretrainedBartModel):
base_model_prefix = "model"
def __init__(self, config: BartConfig):
super().__init__(config)
base_model = BartModel(config)
self.model = base_model
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
lm_labels=None,
generation_mode=False,
**unused
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels
in ``[0, ..., config.vocab_size]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# Mask filling only works for bart-large
from transformers import BartTokenizer, BartForConditionalGeneration
tokenizer = BartTokenizer.from_pretrained('bart-large')
TXT = "My friends are <mask> but they eat too many carbs."
model = BartForConditionalGeneration.from_pretrained('bart-large')
input_ids = tokenizer.batch_encode_plus([TXT], return_tensors='pt')['input_ids']
logits = model(input_ids)[0]
masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
probs = logits[0, masked_index].softmax(dim=0)
values, predictions = probs.topk(5)
tokenizer.decode(predictions).split()
# ['good', 'great', 'all', 'really', 'very']
"""
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
generation_mode=generation_mode,
)
lm_logits = F.linear(outputs[0], self.model.shared.weight)
outputs = (lm_logits,) + outputs[1:] # Add hidden states and attention if they are here
if lm_labels is not None:
loss_fct = nn.CrossEntropyLoss()
# TODO(SS): do we need to ignore pad tokens in lm_labels?
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs
def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
# first step, decoder_cached_states are empty
if not past[1]:
encoder_outputs, decoder_cached_states = past, None
else:
encoder_outputs, decoder_cached_states = past
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"decoder_cached_states": decoder_cached_states,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"generation_mode": True,
}
def prepare_scores_for_generation(self, scores, cur_len, max_length):
if cur_len == 1:
self._force_token_ids_generation(scores, self.config.bos_token_id)
if cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(scores, self.config.eos_token_id)
return scores
@staticmethod
def _reorder_cache(past, beam_idx):
((enc_out, enc_mask), decoder_cached_states) = past
reordered_past = []
for layer_past in decoder_cached_states:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
new_enc_out = enc_out if enc_out is None else enc_out.index_select(0, beam_idx)
new_enc_mask = enc_mask if enc_mask is None else enc_mask.index_select(0, beam_idx)
past = ((new_enc_out, new_enc_mask), reordered_past)
return past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return _make_linear_from_emb(self.model.shared) # make it on the fly
@add_start_docstrings(
"""Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """,
BART_START_DOCSTRING,
)
class BartForSequenceClassification(PretrainedBartModel):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = BartModel(config)
self.classification_head = BartClassificationHead(
config.d_model, config.d_model, config.num_labels, config.classif_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BartConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification loss (cross entropy)
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the
self-attention
heads.
Examples::
from transformers import BartTokenizer, BartForSequenceClassification
import torch
tokenizer = BartTokenizer.from_pretrained('bart-large')
model = BartForSequenceClassification.from_pretrained('bart-large')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute",
add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
)
x = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :]
logits = self.classification_head(sentence_representation)
# Prepend logits
outputs = (logits,) + outputs[1:] # Add hidden states and attention if they are here
if labels is not None: # prepend loss to output,
loss = F.cross_entropy(logits.view(-1, self.config.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
| 44,150 | 41.657971 | 207 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.