repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CPFN | CPFN-master/PointNet2/pointnet2_ops/modules/pointset_feature_propagation.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .geometry_utils import three_nn, three_weighted_sum
class PointsetFeaturePropagation(nn.Module):
"""
Propagate features from an abstracted point set back to the original point set,
analogous to upsampling followed by 1x1 convolutions on an image grid.
"""
def __init__(self, dim_feats, mlp):
super(PointsetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
in_channel = dim_feats
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(in_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
in_channel = out_channel
def forward(self, pos1, pos2, feats1, feats2, fast=True):
"""
Run PointSetFeaturePropagation.
Args:
pos1: input point set position data, [B, C, N]
pos2: abstracted point set position data, [B, C, S]
feats1: input point set feature data, [B, D, N]
feats2: abstracted point set feature data, [B, D, S]
Returns:
new_feats: upsampled point set feature data, [B, D', N]
"""
B, _, N = pos1.shape
if pos2 is None:
interpolated_feats = feats2.repeat(1, 1, N)
else:
S = pos2.shape[2]
# get 3 nearest neighbors for interpolation
nn_dists, nn_indices = three_nn(point_pos=pos2, query_pos=pos1, fast=fast)
# get interpolation weights
nn_dists_recip = 1.0 / (nn_dists + 1e-8)
norm = torch.sum(nn_dists_recip, dim=2, keepdim=True)
nn_weights = nn_dists_recip / norm
# interpolate features of 3 nearest neighbors
interpolated_feats = three_weighted_sum(point_feats=feats2, indices=nn_indices, weights=nn_weights, fast=fast)
if feats1 is not None:
new_feats = torch.cat([feats1, interpolated_feats], dim=1)
else:
new_feats = interpolated_feats
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_feats = F.relu(bn(conv(new_feats)))
return new_feats | 2,234 | 41.980769 | 122 | py |
CPFN | CPFN-master/PointNet2/pointnet2_ops/modules/pointset_abstraction.py | from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from .geometry_utils import farthest_point_sample, select_point_subset, ball_query
class PointsetAbstraction(nn.Module):
"""
Abstract a point set (possibly with features) into a smaller point set,
analogous to a strided convolution on an image grid.
"""
def __init__(self, num_points, dim_pos, dim_feats, radius_list, num_samples_list, mlp_list, group_all=False):
super(PointsetAbstraction, self).__init__()
self.num_points = num_points
self.group_all = group_all
self.radius_list = radius_list if isinstance(radius_list, Sequence) else [radius_list]
self.num_samples_list = num_samples_list if isinstance(num_samples_list, Sequence) else [num_samples_list]
self.mlp_list = mlp_list if isinstance(mlp_list[0], Sequence) else [mlp_list]
if len(self.radius_list) != len(self.num_samples_list) or len(self.radius_list) != len(self.mlp_list):
raise ValueError('Radius, number of samples and mlps lists must have the same number of entries.')
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
for i in range(len(self.mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
in_channel = dim_pos + dim_feats
for out_channel in self.mlp_list[i]:
convs.append(nn.Conv2d(in_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
in_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, pos, feats, fast=True):
"""
Args:
pos: input point set position data, [B, C, N]
feats: input point set feature data, [B, D, N]
Returns:
new_pos: abstracted point set position data, [B, C, S]
new_feats: abstracted point set feature data, [B, D', S]
"""
B, C, N = pos.shape
S = self.num_points
if self.group_all:
subsampling_indices = None
new_pos = None
else:
subsampling_indices = farthest_point_sample(pos, S, fast=fast)
new_pos = select_point_subset(pos, subsampling_indices)
new_feats_list = []
for i, r in enumerate(self.radius_list):
if self.group_all:
grouped_pos = pos.view(B, C, 1, N)
if feats is not None:
grouped_feats = torch.cat([grouped_pos, feats.view(B, -1, 1, N)], dim=1)
else:
grouped_feats = grouped_pos
else:
K = self.num_samples_list[i]
group_idx = ball_query(r, K, pos, new_pos, fast=fast)
grouped_pos = select_point_subset(pos, group_idx)
grouped_pos -= new_pos.view(B, C, S, 1)
if feats is not None:
grouped_feats = select_point_subset(feats, group_idx)
grouped_feats = torch.cat([grouped_feats, grouped_pos], dim=1)
else:
grouped_feats = grouped_pos
# grouped_feats = grouped_feats.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_feats = F.relu(bn(conv(grouped_feats.contiguous()))) # grouped_feats: [B, D, S, K]
new_feats = torch.max(grouped_feats, dim=3)[0] # new_feats: [B, D', S]
new_feats_list.append(new_feats)
new_feats = torch.cat(new_feats_list, dim=1)
return new_pos, new_feats | 3,755 | 47.779221 | 114 | py |
CPFN | CPFN-master/PointNet2/pointnet2_ops/modules/geometry_utils.py | import torch
from .. import cuda_ops
def pairwise_squared_distance(src, dst):
"""
Calculate squared euclidean distance between each pair of points from src to dst.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Args:
src: source points, [B, C, N]
dst: target points, [B, C, M]
Output:
dist: per-point square distance, [B, N, M]
"""
B, _, N = src.shape
_, _, M = dst.shape
dist = -2 * torch.matmul(src.permute(0, 2, 1), dst)
dist += torch.sum(src ** 2, dim=1).view(B, N, 1)
dist += torch.sum(dst ** 2, dim=1).view(B, 1, M)
return dist
def select_point_subset(points, idx):
"""
Select a different subset of points in each batch (same number, but different indices in each batch).
If the indices have more than one dimension per batch, the returned point tensor is shaped like the indices
(see args/returns for details).
Args:
points: input points data, [B, C, N]
idx: sample index data, [B]+[*] (* may be any number of dimensions)
Returns:
new_points:, indexed points data, [B, C]+[*]
"""
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=idx.dtype, device=idx.device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, :, idx].permute(0, -1, *range(1, points.dim()+idx.dim()-3))
return new_points
class _FastFarthestPointSample(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative farthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
[B, N, 3] tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
[B, num_point] tensor containing the set
Based on: https://github.com/erikwijmans/Pointnet2_PyTorch
"""
return cuda_ops.farthest_point_sampling(xyz, npoint)
@staticmethod
def backward(xyz, a=None):
return None, None
_fast_farthest_point_sample = _FastFarthestPointSample.apply
def farthest_point_sample(point_pos, num_point, fast=True):
"""
Args:
point_pos: pointcloud data, [B, C, N]
num_point: number of samples
fast: use faster version with custom CUDA kernel (only works with C==3)
Returns:
farthest_indices: sampled pointcloud index, [B, num_point]
"""
if fast:
if point_pos.shape[1] != 3:
raise ValueError('Points must have exactly three position dimensions when using the fast method.')
return _fast_farthest_point_sample(point_pos.permute(0, 2, 1).contiguous(), num_point).to(dtype=torch.long)
else:
device = point_pos.device
B, C, N = point_pos.shape
farthest_indices = torch.zeros(B, num_point, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest_index = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(num_point):
farthest_indices[:, i] = farthest_index
far_pos = point_pos[batch_indices, :, farthest_index].view(B, C, 1)
dist = torch.sum((point_pos - far_pos) ** 2, dim=1)
mask = dist < distance
distance[mask] = dist[mask]
farthest_index = torch.max(distance, -1)[1]
return farthest_indices
class _FastBallQuery(torch.autograd.Function):
@staticmethod
def forward(ctx, radius, num_samples, point_pos, query_pos):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
num_samples : int
maximum number of features in the balls
point_pos : torch.Tensor
[B, N, 3] xyz coordinates of the features
query_pos : torch.Tensor
[B, S, 3] centers of the ball query
Returns
-------
torch.Tensor
[B, S, num_samples] tensor with the indicies of the features that form the query balls
"""
return cuda_ops.ball_query(query_pos, point_pos, radius, num_samples)
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
_fast_ball_query = _FastBallQuery.apply
def ball_query(radius, num_samples, point_pos, query_pos, fast=True):
"""
Return the smaller set of: all points within a fixed radius of the query point, or the num_samples nearest neighbors.
Args:
radius: local region radius
num_samples: max sample number in local region
point_pos: all points, [B, C, N]
query_pos: query points, [B, C, S]
fast: use faster version with custom CUDA kernel (only works with C==3)
Returns:
group_indices: grouped point indices, [B, S, num_samples]
"""
if fast:
if point_pos.shape[1] != 3:
raise ValueError('Points must have exactly three position dimensions when using the fast method.')
return _fast_ball_query(
radius, num_samples, point_pos.permute(0, 2, 1).contiguous(), query_pos.permute(0, 2, 1).contiguous()).to(dtype=torch.long)
else:
device = point_pos.device
B, _, N = point_pos.shape
_, _, S = query_pos.shape
group_indices = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = pairwise_squared_distance(query_pos, point_pos)
group_indices[sqrdists > radius ** 2] = N
group_indices = group_indices.sort(dim=-1)[0][:, :, :num_samples]
group_first = group_indices[:, :, 0].view(B, S, 1).repeat([1, 1, num_samples])
mask = group_indices == N
group_indices[mask] = group_first[mask]
return group_indices
class _FastThreeNN(torch.autograd.Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
[B, S, 3] tensor of known features
known : torch.Tensor
[B, N, 3] tensor of unknown features
Returns
-------
dist : torch.Tensor
[B, S, 3] l2 distance to the three nearest neighbors
idx : torch.Tensor
[B, S, 3] index of 3 nearest neighbors
"""
dist2, idx = cuda_ops.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
_fast_three_nn = _FastThreeNN.apply
def three_nn(point_pos, query_pos, fast=True):
"""
Return the three nearest neighbors for each of the query points.
Args:
point_pos: all points, [B, C, N]
query_pos: query points, [B, C, S]
fast: use faster version with custom CUDA kernel (only works with C==3)
Returns:
dists: squared euclidean distances, [B, S, 3]
indices: indices of the nearest neighbors, [B, S, 3]
"""
if fast:
if point_pos.shape[1] != 3:
raise ValueError('Points must have exactly three position dimensions when using the fast method.')
dists, indices = _fast_three_nn(
query_pos.permute(0, 2, 1).contiguous(),
point_pos.permute(0, 2, 1).contiguous())
indices = indices.to(dtype=torch.long)
return dists, indices
else:
dists = pairwise_squared_distance(query_pos, point_pos)
dists, indices = dists.sort(dim=-1)
dists, indices = dists[:, :, :3], indices[:, :, :3]
return dists, indices
class _FastThreeWeightedSum(torch.autograd.Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
[B, C, N] Features descriptors to be interpolated from
idx : torch.Tensor
[B, S, 3] three nearest neighbors of the target features in features
weight : torch.Tensor
[B, S, 3] weights
Returns
-------
torch.Tensor
[B, C, S] tensor of the interpolated features
"""
_, _, N = features.size()
# S = idx.size(1)
ctx.three_weighted_sum_for_backward = (idx, weight, N)
return cuda_ops.three_weighted_sum(features, idx.int(), weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, S) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, C, N) tensor with gradients of features
None
None
"""
idx, weight, N = ctx.three_weighted_sum_for_backward
grad_features = cuda_ops.three_weighted_sum_grad(
grad_out.contiguous(), idx.int(), weight, N
)
return grad_features, None, None
_fast_three_weighted_sum = _FastThreeWeightedSum.apply
def three_weighted_sum(point_feats, indices, weights, fast=True):
"""
Intrepolate three nearest neighbors for each of the query points.
Args:
point_feats: all points, [B, C, N]
indices: indices of the points to be summed, [B, S, 3]
weights: weights of the points to be summed, [B, S, 3]
fast: use faster version with custom CUDA kernel
Returns:
weighted sum of each triple [B, C, S]
"""
if fast:
return _fast_three_weighted_sum(point_feats, indices, weights)
else:
return torch.sum(
select_point_subset(point_feats, indices) *
weights.view(indices.shape[0], 1, indices.shape[1], indices.shape[2]), dim=-1)
| 10,626 | 36.419014 | 135 | py |
pdf2image | pdf2image-master/docs/conf.py | #
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "pdf2image"
copyright = "2022, Edouard Belval"
author = "Edouard Belval"
# The short X.Y version
version = "1.16.1"
# The full version, including alpha/beta/rc tags
release = "latest"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"recommonmark",
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pdf2image"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pdf2image.tex",
"pdf2image Documentation",
"Edouard Belval",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pdf2image", "pdf2image Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pdf2image",
"pdf2image Documentation",
author,
"pdf2image",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
from recommonmark.parser import CommonMarkParser
source_parsers = {
".md": CommonMarkParser,
}
source_suffix = [".rst", ".md"]
| 5,595 | 27.697436 | 79 | py |
learning-to-quantize | learning-to-quantize-master/args.py | import argparse
import yaml
import os
import torch
import utils
def add_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch NUQSGD')
# options overwritting yaml options
parser.add_argument('--path_opt', default='default.yaml',
type=str, help='path to a yaml options file')
parser.add_argument('--data', default=argparse.SUPPRESS,
type=str, help='path to data')
parser.add_argument('--logger_name', default='runs/runX')
parser.add_argument('--dataset', default='mnist', help='mnist|cifar10')
# options that can be changed from default
parser.add_argument('--batch_size', type=int, default=argparse.SUPPRESS,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size',
type=int, default=argparse.SUPPRESS, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=argparse.SUPPRESS,
metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=argparse.SUPPRESS,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=argparse.SUPPRESS,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no_cuda', action='store_true',
default=argparse.SUPPRESS,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=argparse.SUPPRESS,
metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=argparse.SUPPRESS,
metavar='N',
help='how many batches to wait before logging training'
' status')
parser.add_argument('--tblog_interval',
type=int, default=argparse.SUPPRESS)
parser.add_argument('--optim', default=argparse.SUPPRESS, help='sgd|dmom')
parser.add_argument('--arch', '-a', metavar='ARCH',
default=argparse.SUPPRESS,
help='model architecture: (default: resnet32)')
parser.add_argument('-j', '--workers', default=argparse.SUPPRESS,
type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--weight_decay', '--wd', default=argparse.SUPPRESS,
type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--train_accuracy', action='store_true',
default=argparse.SUPPRESS)
parser.add_argument('--log_profiler', action='store_true')
parser.add_argument('--lr_decay_epoch',
default=argparse.SUPPRESS)
parser.add_argument('--log_keys', default='')
parser.add_argument('--exp_lr',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--nodropout',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--data_aug',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--noresume', action='store_true',
help='do not resume from checkpoint')
parser.add_argument('--pretrained',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--num_class',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--lr_decay_rate',
default=argparse.SUPPRESS, type=float)
parser.add_argument('--nesterov',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--run_dir', default='runs/runX')
parser.add_argument('--ckpt_name', default='checkpoint.pth.tar')
parser.add_argument('--g_estim', default=argparse.SUPPRESS, type=str)
parser.add_argument('--epoch_iters',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--gvar_log_iter',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--gvar_estim_iter',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--gvar_start',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--g_optim',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--g_optim_start',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--g_osnap_iter',
default='100,1000,10000', type=str)
parser.add_argument('--g_bsnap_iter',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--g_epoch',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--niters',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--no_batch_norm',
default=argparse.SUPPRESS, type=bool)
# NUQ
parser.add_argument('--nuq_method', default='q', help='q|nuq|qinf')
parser.add_argument('--nuq_bits', default=4, type=int)
parser.add_argument('--nuq_bucket_size', default=1024, type=int)
parser.add_argument('--nuq_ngpu', default=1, type=int)
parser.add_argument('--nuq_mul', default=0.5, type=float)
parser.add_argument('--nuq_amq_lr',
default=0.7, type=float)
parser.add_argument('--nuq_amq_epochs',
default=50, type=int)
parser.add_argument('--untrain_steps', default=0, type=int)
parser.add_argument('--untrain_lr', default=0.001, type=float)
parser.add_argument('--untrain_std', default=0.001, type=float)
parser.add_argument('--nuq_sym', default=False, action='store_true')
parser.add_argument('--nuq_inv', default=False, action='store_true')
parser.add_argument('--nuq_parallel', default='no', help='no|gpu1|ngpu')
parser.add_argument('--dist_num', default=20, type=int)
parser.add_argument('--chkpt_iter', default=20, type=int)
parser.add_argument('--nuq_number_of_samples',
default=argparse.SUPPRESS,
type=int,
help='NUQ Number of Samples')
parser.add_argument('--nuq_ig_sm_bkts',
action='store_true',
help='NUQ Ignore Small Buckets')
parser.add_argument('--nuq_truncated_interval',
default=argparse.SUPPRESS,
type=float,
help='NUQ Truncated Interval')
parser.add_argument('--nuq_cd_epochs', default=argparse.SUPPRESS,
help='NUQ Adaptive CD Epochs', type=int)
parser.add_argument('--nuq_layer', action='store_true',
help='NUQ Enable Network Wide Quantization')
args = parser.parse_args()
return args
def opt_to_nuq_kwargs(opt):
return {
'ngpu': opt.nuq_ngpu, 'bits': opt.nuq_bits,
'bucket_size': opt.nuq_bucket_size, 'method': opt.nuq_method,
'multiplier': opt.nuq_mul, 'cd_epochs': opt.nuq_cd_epochs,
'number_of_samples': opt.nuq_number_of_samples,
'path': opt.logger_name, 'symmetric': opt.nuq_sym,
'interval': opt.nuq_truncated_interval, 'amq_epochs': opt.nuq_amq_epochs,
'learning_rate': opt.nuq_learning_rate, 'amq_lr': opt.nuq_amq_lr,
'ig_sm_bkts': opt.nuq_ig_sm_bkts, 'inv': opt.nuq_inv
}
def yaml_opt(yaml_path):
opt = {}
with open(yaml_path, 'r') as handle:
opt = yaml.load(handle, Loader=yaml.FullLoader)
return opt
def get_opt():
args = add_args()
opt = yaml_opt('options/default.yaml')
opt_s = yaml_opt(os.path.join('options/{}/{}'.format(args.dataset,
args.path_opt)))
opt.update(opt_s)
opt.update(vars(args).items())
opt = utils.DictWrapper(opt)
opt.cuda = not opt.no_cuda and torch.cuda.is_available()
if opt.g_batch_size == -1:
opt.g_batch_size = opt.batch_size
return opt
| 8,495 | 47 | 81 | py |
learning-to-quantize | learning-to-quantize-master/utils.py | import shutil
import torch
import numpy as np
class DictWrapper(object):
def __init__(self, d):
self.d = d
def __getattr__(self, key):
if key in self.d:
return self.d[key]
else:
return None
class SaveCheckpoint(object):
def __init__(self):
# remember best prec@1 and save checkpoint
self.best_prec1 = 0
def __call__(self, model, prec1, opt, optimizer,
filename='checkpoint.pth.tar', gvar=None):
is_best = prec1 > self.best_prec1
self.best_prec1 = max(prec1, self.best_prec1)
state = {
'epoch': optimizer.epoch,
'niters': optimizer.niters,
'opt': opt.d,
'model': model.state_dict(),
'best_prec1': self.best_prec1,
}
if gvar is not None:
state.update({'gvar': gvar.state_dict()})
torch.save(state, opt.logger_name+'/'+filename)
if is_best:
shutil.copyfile(opt.logger_name+'/'+filename,
opt.logger_name+'/model_best.pth.tar')
def base_lr(optimizer, opt):
lr = opt.lr
return lr
def adjust_lr(optimizer, opt):
if opt.niters > 0:
niters = optimizer.niters
else:
niters = optimizer.niters//opt.epoch_iters
if isinstance(opt.lr_decay_epoch, str):
adjust_learning_rate_multi(optimizer, niters, opt)
else:
adjust_learning_rate(optimizer, niters, opt)
def adjust_learning_rate(optimizer, epoch, opt):
""" Sets the learning rate to the initial LR decayed by 10 """
if opt.exp_lr:
""" test
A=np.arange(200);
np.round(np.power(.1, np.power(2., A/80.)-1), 6)[[0,80,120,160]]
test """
last_epoch = 2. ** (float(epoch) / int(opt.lr_decay_epoch)) - 1
else:
last_epoch = epoch // int(opt.lr_decay_epoch)
lr = base_lr(optimizer, opt) * (0.1 ** last_epoch)
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate_multi(optimizer, epoch, opt):
"""Sets the learning rate to the initial LR decayed by 10"""
lr_decay_epoch = np.array(list(map(int, opt.lr_decay_epoch.split(','))))
if len(lr_decay_epoch) == 1:
return adjust_learning_rate(optimizer, epoch, opt)
el = (epoch // lr_decay_epoch)
ei = np.where(el > 0)[0]
if len(ei) == 0:
ei = [0]
print(el)
print(ei)
# lr = opt.lr * (opt.lr_decay_rate ** (ei[-1] + el[ei[-1]]))
lr = base_lr(optimizer, opt) * (
opt.lr_decay_rate ** (ei[-1]+(el[ei[-1]] > 0)))
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 3,161 | 28.551402 | 76 | py |
learning-to-quantize | learning-to-quantize-master/data.py | import torch
from torchvision import datasets, transforms
import torch.utils.data as data
import numpy as np
import os
def get_loaders(opt):
if opt.dataset == 'mnist':
return get_mnist_loaders(opt)
elif opt.dataset == 'cifar10':
return get_cifar10_loaders(opt)
elif opt.dataset == 'cifar100':
return get_cifar100_loaders(opt)
elif opt.dataset == 'svhn':
return get_svhn_loaders(opt)
elif opt.dataset.startswith('imagenet'):
return get_imagenet_loaders(opt)
elif opt.dataset == 'logreg':
return get_logreg_loaders(opt)
elif 'class' in opt.dataset:
return get_logreg_loaders(opt)
def dataset_to_loaders(train_dataset, test_dataset, opt):
kwargs = {'num_workers': opt.workers,
'pin_memory': True} if opt.cuda else {}
idxdataset = IndexedDataset(train_dataset, opt, train=True)
train_sampler = None
train_loader = torch.utils.data.DataLoader(
idxdataset,
batch_size=opt.batch_size,
sampler=train_sampler,
shuffle=(train_sampler is None),
drop_last=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
IndexedDataset(test_dataset, opt),
batch_size=opt.test_batch_size, shuffle=False,
**kwargs)
train_test_loader = torch.utils.data.DataLoader(
IndexedDataset(train_dataset, opt, train=True),
batch_size=opt.test_batch_size, shuffle=False,
**kwargs)
return train_loader, test_loader, train_test_loader
def get_minvar_loader(train_loader, opt):
kwargs = {'num_workers': opt.workers,
'pin_memory': True} if opt.cuda else {}
idxdataset = train_loader.dataset
train_loader = torch.utils.data.DataLoader(
idxdataset,
batch_size=opt.g_batch_size,
shuffle=True,
drop_last=False, **kwargs)
return train_loader
class IndexedDataset(data.Dataset):
def __init__(self, dataset, opt, train=False):
np.random.seed(2222)
self.ds = dataset
self.opt = opt
def __getitem__(self, index):
subindex = index
img, target = self.ds[subindex]
return img, target, index
def __len__(self):
return len(self.ds)
def get_mnist_loaders(opt, **kwargs):
transform = transforms.ToTensor()
if not opt.no_transform:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST(
opt.data, train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(opt.data, train=False, transform=transform)
return dataset_to_loaders(train_dataset, test_dataset, opt, **kwargs)
def get_cifar10_100_transform(opt):
normalize = transforms.Normalize(mean=(0.4914, 0.4822, 0.4465),
std=(0.2023, 0.1994, 0.2010))
if opt.data_aug:
transform = [
transforms.RandomAffine(10, (.1, .1), (0.7, 1.2), 10),
transforms.ColorJitter(.2, .2, .2),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor(),
normalize,
]
else:
transform = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
return normalize, transform
def get_cifar10_loaders(opt):
normalize, transform = get_cifar10_100_transform(opt)
train_dataset = datasets.CIFAR10(root=opt.data, train=True,
transform=transforms.Compose(transform),
download=True)
test_dataset = datasets.CIFAR10(
root=opt.data, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
def get_cifar100_loaders(opt):
normalize, transform = get_cifar10_100_transform(opt)
train_dataset = datasets.CIFAR100(root=opt.data, train=True,
transform=transforms.Compose(transform),
download=True)
test_dataset = datasets.CIFAR100(
root=opt.data, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
def get_svhn_loaders(opt, **kwargs):
normalize = transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
if opt.data_aug:
transform = [
transforms.RandomAffine(10, (.1, .1), (0.7, 1.), 10),
transforms.ColorJitter(.2, .2, .2),
transforms.RandomCrop(32),
transforms.ToTensor(),
normalize,
]
else:
transform = [
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
]
train_dataset = torch.utils.data.ConcatDataset(
(datasets.SVHN(
opt.data, split='train', download=True,
transform=transforms.Compose(transform)),
datasets.SVHN(
opt.data, split='extra', download=True,
transform=transforms.Compose(transform))))
test_dataset = datasets.SVHN(opt.data, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
def get_imagenet_loaders(opt):
# Data loading code
traindir = os.path.join(opt.data, 'train')
valdir = os.path.join(opt.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
test_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
class InfiniteLoader(object):
def __init__(self, data_loader):
self.data_loader = data_loader
def __iter__(self):
self.data_iter = iter([])
return self
def __next__(self):
try:
data = next(self.data_iter)
except StopIteration:
if isinstance(self.data_loader, list):
II = self.data_loader
self.data_iter = (II[i] for i in torch.randperm(len(II)))
else:
self.data_iter = iter(self.data_loader)
data = next(self.data_iter)
return data
def next(self):
# for python2
return self.__next__()
def __len__(self):
return len(self.data_loader)
def random_orthogonal_matrix(gain, shape):
if len(shape) < 2:
raise RuntimeError("Only shapes of length 2 or more are "
"supported.")
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return np.asarray(gain * q, dtype=np.float)
class LinearDataset(data.Dataset):
def __init__(self, C, D, num, dim, num_class, train=True):
X = np.zeros((C.shape[0], num))
Y = np.zeros((num,))
for i in range(num_class):
n = num // num_class
e = np.random.normal(0.0, 1.0, (dim, n))
X[:, i * n:(i + 1) * n] = np.dot(D[:, :, i], e) + C[:, i:i + 1]
Y[i * n:(i + 1) * n] = i
self.X = X
self.Y = Y
self.classes = range(num_class)
def __getitem__(self, index):
X = torch.Tensor(self.X[:, index]).float()
Y = int(self.Y[index])
return X, Y
def __len__(self):
return self.X.shape[1]
def get_logreg_loaders(opt, **kwargs):
# np.random.seed(1234)
np.random.seed(2222)
# print("Create W")
C = opt.c_const * random_orthogonal_matrix(1.0, (opt.dim, opt.num_class))
D = opt.d_const * random_orthogonal_matrix(
1.0, (opt.dim, opt.dim, opt.num_class))
# print("Create train")
train_dataset = LinearDataset(C, D, opt.num_train_data, opt.dim,
opt.num_class, train=True)
# print("Create test")
test_dataset = LinearDataset(C, D,
opt.num_test_data, opt.dim, opt.num_class,
train=False)
torch.save((train_dataset.X, train_dataset.Y,
test_dataset.X, test_dataset.Y,
C), opt.logger_name + '/data.pth.tar')
return dataset_to_loaders(train_dataset, test_dataset, opt)
| 9,431 | 31.979021 | 78 | py |
learning-to-quantize | learning-to-quantize-master/log_utils.py | from collections import OrderedDict, defaultdict
import numpy as np
from tensorboardX import SummaryWriter
import time
import torch
import os
class TBXWrapper(object):
def configure(self, logger_name, flush_secs=5, opt=None):
self.writer = SummaryWriter(logger_name, flush_secs=flush_secs)
self.logger_name = logger_name
self.logobj = defaultdict(lambda: list())
self.opt = opt
def log_value(self, name, val, step):
self.writer.add_scalar(name, val, step)
self.logobj[name] += [(time.time(), step, float(val))]
def log_histogram(self, name, val, step):
self.writer.add_histogram(name, val, step)
def add_scalar(self, name, val, step):
self.log_value(name, val, step)
def save_log(self, filename='log.pth.tar'):
try:
os.makedirs(self.opt.logger_name)
except os.error:
pass
torch.save(dict(self.logobj), self.opt.logger_name+'/'+filename)
def close(self):
self.writer.close()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=0):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / (.0001 + self.count)
def __str__(self):
if self.count == 0:
return '%d' % self.val
return '%.4f (%.4f)' % (self.val, self.avg)
def tb_log(self, tb_logger, name, step=None):
tb_logger.log_value(name, self.val, step=step)
class TimeMeter(object):
"""Store last K times"""
def __init__(self, k=1000):
self.k = k
self.reset()
def reset(self):
self.vals = [0]*self.k
self.i = 0
self.mu = 0
def update(self, val):
self.vals[self.i] = val
self.i = (self.i + 1) % self.k
self.mu = (1-1./self.k)*self.mu+(1./self.k)*val
def __str__(self):
# return '%.4f +- %.2f' % (np.mean(self.vals), np.std(self.vals))
return '%.4f +- %.2f' % (self.mu, np.std(self.vals))
def tb_log(self, tb_logger, name, step=None):
tb_logger.log_value(name, self.vals[0], step=step)
class StatisticMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.mu = AverageMeter()
self.std = AverageMeter()
self.min = AverageMeter()
self.max = AverageMeter()
self.med = AverageMeter()
def update(self, val, n=0):
val = np.ma.masked_invalid(val)
val = val.compressed()
n = min(n, len(val))
if n == 0:
return
self.mu.update(np.mean(val), n=n)
self.std.update(np.std(val), n=n)
self.min.update(np.min(val), n=n)
self.max.update(np.max(val), n=n)
self.med.update(np.median(val), n=n)
def __str__(self):
# return 'mu:{}|med:{}|std:{}|min:{}|max:{}'.format(
# self.mu, self.med, self.std, self.min, self.max)
return 'mu:{}|med:{}'.format(self.mu, self.med)
def tb_log(self, tb_logger, name, step=None):
self.mu.tb_log(tb_logger, name+'_mu', step=step)
self.med.tb_log(tb_logger, name+'_med', step=step)
self.std.tb_log(tb_logger, name+'_std', step=step)
self.min.tb_log(tb_logger, name+'_min', step=step)
self.max.tb_log(tb_logger, name+'_max', step=step)
class LogCollector(object):
"""A collection of logging objects that can change from train to val"""
def __init__(self, opt):
self.meters = OrderedDict()
self.log_keys = opt.log_keys.split(',')
def reset(self):
self.meters = OrderedDict()
def update(self, k, v, n=0, log_scale=False, bins=100):
if k not in self.meters:
if type(v).__module__ == np.__name__:
self.meters[k] = StatisticMeter()
else:
self.meters[k] = AverageMeter()
self.meters[k].update(v, n)
def __str__(self):
s = ''
for i, (k, v) in enumerate(self.meters.items()):
if k in self.log_keys or 'all' in self.log_keys:
if i > 0:
s += ' '
s += k+': '+str(v)
return s
def tb_log(self, tb_logger, prefix='', step=None):
for k, v in self.meters.items():
v.tb_log(tb_logger, prefix+k, step=step)
class Profiler(object):
def __init__(self, k=10):
self.k = k
self.meters = OrderedDict()
self.start()
def tic(self):
self.t = time.time()
def toc(self, name):
end = time.time()
if name not in self.times:
self.times[name] = []
self.times[name] += [end-self.t]
self.tic()
def start(self):
self.times = OrderedDict()
self.tic()
def end(self):
for k, v in self.times.items():
if k not in self.meters:
self.meters[k] = TimeMeter(self.k)
self.meters[k].update(sum(v))
self.start()
def __str__(self):
s = ''
for i, (k, v) in enumerate(self.meters.items()):
if i > 0:
s += ' '
s += k+': ' + str(v)
return s
| 5,411 | 27.041451 | 75 | py |
learning-to-quantize | learning-to-quantize-master/log_plotter.py | from scipy import interpolate
import numpy as np
import os
import re
import torch
import pylab as plt
import matplotlib.ticker as mtick
from tensorboard.backend.event_processing import event_accumulator
def get_run_names(logdir, patterns):
run_names = []
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names += [root]
# print(run_names)
run_names.sort()
return run_names
def get_run_names_events(logdir, patterns):
run_names = {}
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names[root] = []
for file in files:
if re.match('.*events\.out.*', file):
run_names[root].append(file)
run_names[root] = sorted(run_names[root])
# print(run_names)
return run_names
def get_data_pth(logdir, run_names, tag_names, batch_size=None):
data = []
for run_name in run_names:
d = {}
logdata = torch.load(run_name + '/log.pth.tar')
for tag_name in tag_names:
if tag_name not in logdata:
continue
js = logdata[tag_name]
d[tag_name] = np.array([[x[j] for x in js]
for j in range(1, 3)])
data += [d]
return data
def get_data_pth_events(logdir, run_names, tag_names, batch_size=None):
data = []
for run_name, events in run_names.items():
d = {}
for event in events:
ea = event_accumulator.EventAccumulator(run_name+'/'+event,
size_guidance={ # see below regarding this argument
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 0,
event_accumulator.HISTOGRAMS: 1,
})
ea.Reload()
for tag_name in tag_names:
if tag_name not in ea.Tags()['scalars']:
continue
scalar = ea.Scalars(tag_name)
if tag_name not in d:
d[tag_name] = np.array(
[[dp.step for dp in scalar], [dp.value for dp in scalar]])
else:
new_array = np.array([dp.step for dp in scalar])
indexes = new_array > d[tag_name][0][-1]
res1 = np.concatenate(
(d[tag_name][0], np.array([dp.step for dp in scalar])[indexes]))
res2 = np.concatenate(
(d[tag_name][1], np.array([dp.value for dp in scalar])[indexes]))
d[tag_name] = (res1, res2)
data += [d]
return data
def plot_smooth(x, y, npts=100, order=3, *args, **kwargs):
x_smooth = np.linspace(x.min(), x.max(), npts)
tck = interpolate.splrep(x, y, s=0)
y_smooth = interpolate.splev(x_smooth, tck, der=0)
plt.plot(x_smooth, y_smooth, *args, **kwargs)
def plot_smooth_o1(x, y, *args, **kwargs):
plot_smooth(x, y, 100, 1, *args, **kwargs)
def get_legend(lg_tags, run_name, lg_replace=[]):
lg = ""
for lgt in lg_tags:
res = ".*?($|,)" if ',' not in lgt and '$' not in lgt else ''
mg = re.search(lgt + res, run_name)
if mg:
lg += mg.group(0)
lg = lg.replace('_,', ',')
lg = lg.strip(',')
for a, b in lg_replace:
lg = lg.replace(a, b)
return lg
def plot_tag(data, plot_f, run_names, tag_name, lg_tags, ylim=None, color0=0,
ncolor=None, lg_replace=[], no_title=False):
xlabel = {}
ylabel = {'Tacc': 'Training Accuracy (%)', 'Terror': 'Training Error (%)',
'train/accuracy': 'Training Accuracy (%)',
'Vacc': 'Test Accuracy (%)', 'Verror': 'Test Error (%)',
'valid/accuracy': 'Test Accuracy (%)',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss', 'Vloss': 'Loss', 'lr': 'Learning rate',
'grad_bias': 'Gradient Diff norm',
'est_var': 'Mean variance',
'est_snr': 'Mean SNR',
'nb_error': 'NB Error',
'est_nvar': 'Mean Normalized Variance'}
titles = {'Tacc': 'Training Accuracy', 'Terror': 'Training Error',
'train/accuracy': 'Training Accuracy',
'Vacc': 'Test Accuracy', 'Verror': 'Test Error',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss on full training set', 'lr': 'Learning rate',
'Vloss': 'Loss on validation set',
'grad_bias': 'Optimization Step Bias',
'nb_error': 'Norm-based Variance Error',
'est_var': 'Optimization Step Variance (w/o learning rate)',
'est_snr': 'Optimization Step SNR',
'est_nvar': 'Optimization Step Normalized Variance (w/o lr)',
}
yscale_log = ['Tloss', 'Vloss', 'est_var'] # , 'est_var'
yscale_base = []
# yscale_sci = ['est_bias', 'est_var']
plot_fs = {'Tacc': plot_f, 'Vacc': plot_f,
'Terror': plot_f, 'Verror': plot_f,
'Tloss': plot_f, 'Vloss': plot_f,
}
for k in list(ylabel.keys()):
if k not in xlabel:
xlabel[k] = 'Training Iteration'
if k not in plot_fs:
plot_fs[k] = plot_f
if k not in plot_fs:
plot_fs[k] = plt.plot
if not isinstance(data, list):
data = [data]
run_names = [run_names]
color = ['blue', 'orangered', 'limegreen', 'darkkhaki', 'cyan', 'grey']
color = color[:ncolor]
style = ['-', '--', ':', '-.']
# plt.rcParams.update({'font.size': 12})
plt.grid(linewidth=1)
legends = []
for i in range(len(data)):
if tag_name not in data[i]:
continue
legends += [get_legend(lg_tags, run_names[i], lg_replace)]
plot_fs[tag_name](
data[i][tag_name][0], data[i][tag_name][1],
linestyle=style[(color0 + i) // len(color)],
color=color[(color0 + i) % len(color)], linewidth=2)
if not no_title:
plt.title(titles[tag_name])
if tag_name in yscale_log:
ax = plt.gca()
if tag_name in yscale_base:
ax.set_yscale('log', basey=np.e)
ax.yaxis.set_major_formatter(mtick.FuncFormatter(ticks))
else:
ax.set_yscale('log')
else:
ax = plt.gca()
ax.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
if ylim is not None:
plt.ylim(ylim)
# plt.xlim([0, 25000])
plt.legend(legends, bbox_to_anchor=(1.1, 1.05))
plt.xlabel(xlabel[tag_name])
plt.ylabel(ylabel[tag_name])
def ticks(y, pos):
return r'$e^{{{:.0f}}}$'.format(np.log(y))
def plot_runs_and_tags(get_data_f, plot_f, logdir, patterns, tag_names,
fig_name, lg_tags, ylim, batch_size=None, sep_h=True,
ncolor=None, save_single=False, lg_replace=[],
no_title=False):
run_names = get_run_names_events(logdir, patterns)
data = get_data_f(logdir, run_names, tag_names, batch_size)
if len(data) == 0:
return data, run_names
num = len(tag_names)
height = (num + 1) // 2
width = 2 if num > 1 else 1
if not save_single:
fig = plt.figure(figsize=(7 * width, 4 * height))
fig.subplots(height, width)
else:
plt.figure(figsize=(7, 4))
plt.tight_layout(pad=1., w_pad=3., h_pad=3.0)
fi = 1
if save_single:
fig_dir = fig_name[:fig_name.rfind('.')]
try:
os.makedirs(fig_dir)
except os.error:
pass
for i in range(len(tag_names)):
yl = ylim[i]
if not isinstance(yl, list) and yl is not None:
yl = ylim
if not save_single:
plt.subplot(height, width, fi)
plot_tag(data, plot_f, list(run_names), tag_names[i], lg_tags, yl,
ncolor=ncolor, lg_replace=lg_replace, no_title=no_title)
if save_single:
plt.savefig('%s/%s.pdf' % (fig_dir, tag_names[i]),
dpi=100, bbox_inches='tight')
plt.figure(figsize=(7, 4))
fi += 1
plt.savefig(fig_name, dpi=100, bbox_inches='tight')
return data, run_names
| 8,772 | 36.016878 | 104 | py |
learning-to-quantize | learning-to-quantize-master/models/cifar10_wresnet2.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
# return self.fc(out)
# return F.log_softmax(out, dim=-1)
return x
| 3,794 | 42.62069 | 116 | py |
learning-to-quantize | learning-to-quantize-master/models/logreg.py | import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, dim, num_class):
super(Linear, self).__init__()
self.linear = nn.Linear(dim, num_class)
def forward(self, x):
x = self.linear(x)
return F.log_softmax(x, dim=-1)
class TwoLinear(nn.Module):
def __init__(self, dim, num_class):
super(TwoLinear, self).__init__()
self.linear1 = nn.Linear(dim, dim)
self.linear2 = nn.Linear(dim, num_class)
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return F.log_softmax(x, dim=-1)
| 637 | 24.52 | 48 | py |
learning-to-quantize | learning-to-quantize-master/models/linreg.py | import torch.nn as nn
# import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, dim, num_class):
super(Linear, self).__init__()
self.linear = nn.Linear(dim, num_class)
def forward(self, x):
x = self.linear(x)
return x
class TwoLinear(nn.Module):
def __init__(self, dim, num_class):
super(TwoLinear, self).__init__()
self.linear1 = nn.Linear(dim, dim)
self.linear2 = nn.Linear(dim, num_class)
def forward(self, x):
# x = F.relu(self.linear1(x))
x = self.linear1(x)
x = self.linear2(x)
return x
| 623 | 23 | 48 | py |
learning-to-quantize | learning-to-quantize-master/models/loss.py | import torch.nn.functional as F
def nll_loss(model, data, reduction='mean', weights=1):
data, target = data[0].cuda(), data[1].cuda()
model.zero_grad()
output = model(data)
loss = F.nll_loss(output, target, reduction=reduction)*weights
return loss
| 270 | 26.1 | 66 | py |
learning-to-quantize | learning-to-quantize-master/models/cifar10_wresnet.py | # https://github.com/xternalz/WideResNet-pytorch/blob/master/wideresnet.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
# return self.fc(out)
return F.log_softmax(out, dim=-1)
| 3,850 | 43.264368 | 116 | py |
learning-to-quantize | learning-to-quantize-master/models/cifar10.py | # https://github.com/akamaster/pytorch_resnet_cifar10
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
__all__ = ['ResNet', 'resnet8', 'resnet20', 'resnet32',
'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
if hasattr(init, 'kaiming_normal_'):
init.kaiming_normal_(m.weight)
else:
init.kaiming_normal(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_class=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_class)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return F.log_softmax(out, dim=-1)
def resnet8(num_class=10):
return ResNet(BasicBlock, [1, 1, 1], num_class=num_class)
def resnet20(num_class=10):
return ResNet(BasicBlock, [3, 3, 3], num_class=num_class)
def resnet32(num_class=10):
return ResNet(BasicBlock, [5, 5, 5], num_class=num_class)
def resnet44(num_class=10):
return ResNet(BasicBlock, [7, 7, 7], num_class=num_class)
def resnet56(num_class=10):
return ResNet(BasicBlock, [9, 9, 9], num_class=num_class)
def resnet110(num_class=10):
return ResNet(BasicBlock, [18, 18, 18], num_class=num_class)
def resnet1202(num_class=10):
return ResNet(BasicBlock, [200, 200, 200], num_class=num_class)
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(
lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters()))))
class Convnet(nn.Module):
def __init__(self, dropout=True, num_class=10):
"""
2conv + 2fc + dropout, from adam's paper
similar to mnist's convnet
100 epochs lr update at 50
"""
super(Convnet, self).__init__()
self.dropout = dropout
# self.input_drop = nn.Dropout2d(p=0.2)
self.conv1 = nn.Conv2d(3, 64, kernel_size=5)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5)
# self.conv2 = nn.Conv2d(64, 64, kernel_size=5)
# self.conv3 = nn.Conv2d(64, 128, kernel_size=5)
self.fc1 = nn.Linear(128*5*5, 1000)
self.fc2 = nn.Linear(1000, num_class)
def forward(self, x):
if self.dropout:
x = F.dropout2d(x, training=self.training, p=0.2)
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
# x = F.relu(F.max_pool2d(self.conv3(x), 3))
x = x.view(-1, 128*5*5)
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class MLP(nn.Module):
def __init__(self, dropout=True, num_class=10):
"""
mnist MLP
"""
super(MLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(3*32*32, 1024)
self.fc2 = nn.Linear(1024, 1024)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(1024, num_class)
def forward(self, x):
x = x.view(-1, 3*32*32)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 7,687 | 31.033333 | 78 | py |
learning-to-quantize | learning-to-quantize-master/models/__init__.py | import torch
import torch.nn
import models.mnist
import models.cifar10
import models.logreg
import models.imagenet
import models.cifar10_wresnet
import models.loss
def init_model(opt):
if opt.dataset == 'mnist':
if opt.arch == 'cnn':
model = models.mnist.Convnet(not opt.nodropout)
elif opt.arch == 'bigcnn':
model = models.mnist.BigConvnet(not opt.nodropout)
elif opt.arch == 'mlp':
model = models.mnist.MLP(not opt.nodropout)
elif opt.arch == 'smlp':
model = models.mnist.SmallMLP(not opt.nodropout)
elif opt.arch == 'ssmlp':
model = models.mnist.SuperSmallMLP(not opt.nodropout)
elif (opt.dataset == 'cifar10' or opt.dataset == 'svhn'
or opt.dataset == 'cifar100'):
if opt.arch == 'cnn':
model = models.cifar10.Convnet(num_class=opt.num_class)
elif opt.arch == 'mlp':
model = models.cifar10.MLP(num_class=opt.num_class)
elif opt.arch.startswith('wrn'):
depth, widen_factor = map(int, opt.arch[3:].split('-'))
model = models.cifar10_wresnet.WideResNet(
depth, opt.num_class, widen_factor, 0.3)
else:
model = models.cifar10.__dict__[opt.arch](
num_class=opt.num_class)
model = torch.nn.DataParallel(model)
elif opt.dataset == 'imagenet':
model = models.imagenet.Model(opt.arch, opt.pretrained)
elif opt.dataset.startswith('imagenet'):
model = models.imagenet.Model(opt.arch, opt.pretrained, opt.num_class)
elif opt.dataset == 'logreg':
model = models.logreg.Linear(opt.dim, opt.num_class)
elif opt.dataset == '10class':
model = models.logreg.Linear(opt.dim, opt.num_class)
elif opt.dataset == '5class':
model = models.logreg.Linear(opt.dim, opt.num_class)
model.criterion = models.loss.nll_loss
if opt.cuda:
model.cuda()
return model
| 1,969 | 36.169811 | 78 | py |
learning-to-quantize | learning-to-quantize-master/models/imagenet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models
class Model(nn.Module):
def __init__(self, arch, pretrained=False, nclass=None):
super(Model, self).__init__()
model = torchvision.models.__dict__[arch](pretrained)
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
else:
model = torch.nn.DataParallel(model)
if nclass is not None and nclass != model.module.fc.out_features:
if arch.startswith('resnet'):
model.module.fc = nn.Linear(model.module.fc.in_features,
nclass)
else:
raise Exception('Not implemented.')
self.model = model
def forward(self, x):
out = self.model(x)
return F.log_softmax(out, dim=-1)
| 910 | 34.038462 | 73 | py |
learning-to-quantize | learning-to-quantize-master/models/clone_model.py | import torch
import torch.nn as nn
import copy
from torch.nn.parallel.parallel_apply import parallel_apply
class CloneModel(nn.Module):
def __init__(self, module, batch_size):
super(CloneModel, self).__init__()
self.replicas = [module]
self.batch_size = batch_size
for i in range(batch_size):
self.replicas += copy.deepcopy(module)
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.scatter(inputs, kwargs)
for i in range(1, self.batch_size):
self.replicas[i].load_state_dict(self.replicas[0].state_dict())
outputs = parallel_apply(self.replicas, inputs, kwargs)
return self.gather(outputs)
def scatter(self, inputs, kwargs):
x = inputs[0]
xs = torch.split(x, 1)
kwargs = None
return [xs], kwargs
def gather(self, outputs):
pass
| 887 | 28.6 | 75 | py |
learning-to-quantize | learning-to-quantize-master/models/mnist.py | import torch.nn as nn
import torch.nn.functional as F
class MNISTNet(nn.Module):
def __init__(self, dropout=True):
"""30 epochs no lr update
"""
super(MNISTNet, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = self.conv2(x)
if self.dropout:
x = self.conv2_drop(x)
x = F.relu(F.max_pool2d(x, 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class Convnet(nn.Module):
def __init__(self, dropout=True):
"""
2conv + 2fc + dropout, something to get ~.5% error.
something close to what maxout paper uses?
30 epochs no lr update
"""
super(Convnet, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 64, kernel_size=5)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(128*4*4, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = self.conv2(x)
if self.dropout:
x = self.conv2_drop(x)
x = F.relu(F.max_pool2d(x, 2))
x = x.view(-1, 128*4*4)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class BigConvnet(nn.Module):
def __init__(self, dropout=True):
"""
Bigger than Convnet, 1000 hidden dims
"""
super(BigConvnet, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 1000, kernel_size=5)
self.conv2 = nn.Conv2d(1000, 1000, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(1000*4*4, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = self.conv2(x)
if self.dropout:
x = self.conv2_drop(x)
x = F.relu(F.max_pool2d(x, 2))
x = x.view(-1, 1000*4*4)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class MLP(nn.Module):
def __init__(self, dropout=True):
"""
Dropout paper, table 2, row 4, 1.25% error.
http://www.cs.toronto.edu/~nitish/dropout/mnist.pbtxt
50 epochs, lr update 30
"""
super(MLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(28*28, 1024)
self.fc2 = nn.Linear(1024, 1024)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(1024, 10)
def forward(self, x):
x = x.view(-1, 28*28)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
class SmallMLP(nn.Module):
def __init__(self, dropout=True):
"""
Like MLP but smaller hidden dims
"""
super(SmallMLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(28*28, 50)
self.fc2 = nn.Linear(50, 50)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(50, 10)
def forward(self, x):
x = x.view(-1, 28*28)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
class SuperSmallMLP(nn.Module):
def __init__(self, dropout=True):
"""
Like MLP but smaller hidden dims
"""
super(SuperSmallMLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(28*28, 20)
self.fc2 = nn.Linear(20, 20)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(20, 10)
def forward(self, x):
x = x.view(-1, 28*28)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
| 5,494 | 30.58046 | 61 | py |
learning-to-quantize | learning-to-quantize-master/estim/optim.py | import logging
import torch
import utils
from data import get_minvar_loader
from log_utils import LogCollector
from estim.gvar import MinVarianceGradient
class OptimizerFactory(object):
def __init__(self, model, train_loader, tb_logger, opt):
self.model = model
self.opt = opt
self.niters = 0
self.optimizer = None
self.epoch = 0
self.logger = LogCollector(opt)
self.param_groups = None
self.gest_used = False
minvar_loader = get_minvar_loader(train_loader, opt)
self.gvar = MinVarianceGradient(
model, minvar_loader, opt, tb_logger)
self.reset()
def reset(self):
model = self.model
opt = self.opt
if opt.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(),
lr=opt.lr, momentum=opt.momentum,
weight_decay=opt.weight_decay,
nesterov=opt.nesterov)
elif opt.optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
lr=opt.lr,
weight_decay=opt.weight_decay)
self.optimizer = optimizer
if self.param_groups is not None:
self.optimizer.param_groups = self.param_groups
else:
self.param_groups = self.optimizer.param_groups
def step(self, profiler):
gvar = self.gvar
opt = self.opt
model = self.model
self.optimizer.zero_grad()
# Frequent snaps
inits = list(map(int, opt.g_osnap_iter.split(',')[0:2]))
every = int(opt.g_osnap_iter.split(',')[-1])
if (((self.niters - opt.gvar_start) % every == 0 or self.niters in inits)
and self.niters >= opt.gvar_start):
print(self.niters)
if opt.g_estim == 'nuq' and opt.nuq_method != 'none':
stats = gvar.gest.snap_online_mean(model)
if opt.nuq_parallel == 'ngpu':
for qdq in gvar.gest.qdq:
qdq.set_mean_variance(stats)
else:
gvar.gest.qdq.set_mean_variance(stats)
if opt.nuq_method == 'amq' or opt.nuq_method == 'alq' or opt.nuq_method == 'alq_nb' or opt.nuq_method == 'amq_nb':
if opt.nuq_parallel == 'ngpu':
for qdq in gvar.gest.qdq:
qdq.update_levels()
else:
gvar.gest.qdq.update_levels()
pg_used = gvar.gest_used
loss = gvar.grad(self.niters)
if gvar.gest_used != pg_used:
logging.info('Optimizer reset.')
self.gest_used = gvar.gest_used
utils.adjust_lr(self, opt)
self.reset()
self.optimizer.step()
profiler.toc('optim')
profiler.end()
return loss
| 2,964 | 33.476744 | 126 | py |
learning-to-quantize | learning-to-quantize-master/estim/sgd.py | import torch
import torch.nn
import torch.multiprocessing
from .gestim import GradientEstimator
class SGDEstimator(GradientEstimator):
def __init__(self, *args, **kwargs):
super(SGDEstimator, self).__init__(*args, **kwargs)
self.init_data_iter()
def grad(self, model, in_place=False):
data = next(self.data_iter)
loss = model.criterion(model, data)
if in_place:
loss.backward()
return loss
g = torch.autograd.grad(loss, model.parameters())
return g
| 543 | 22.652174 | 59 | py |
learning-to-quantize | learning-to-quantize-master/estim/gvar.py | import torch
import torch.nn
import torch.multiprocessing
import numpy as np
from estim.sgd import SGDEstimator
from estim.nuq import NUQEstimator
#from estim.nuq import NUQEstimatorSingleGPUParallel
from estim.nuq import NUQEstimatorMultiGPUParallel
class MinVarianceGradient(object):
def __init__(self, model, data_loader, opt, tb_logger):
self.model = model
sgd = SGDEstimator(data_loader, opt, tb_logger)
if opt.g_estim == 'sgd':
gest = SGDEstimator(data_loader, opt, tb_logger)
elif opt.g_estim == 'nuq':
if opt.nuq_parallel == 'no':
gest = NUQEstimator(data_loader, opt, tb_logger)
# elif opt.nuq_parallel == 'gpu1':
# gest = NUQEstimatorSingleGPUParallel(
# data_loader, opt, tb_logger)
else:
gest = NUQEstimatorMultiGPUParallel(
data_loader, opt, tb_logger)
self.sgd = sgd
self.gest = gest
self.opt = opt
self.tb_logger = tb_logger
self.gest_used = False
self.Esgd = 0
self.last_log_iter = 0
self.opt = opt
def is_log_iter(self, niters):
opt = self.opt
if (niters-self.last_log_iter >= opt.gvar_log_iter
and niters >= opt.gvar_start):
self.last_log_iter = niters
return True
return False
def create_histogram(self, norms, buckets):
keys = norms.keys()
bucket_norms = {}
def find_bucket(x):
for i in range(len(buckets) - 1):
if x >= buckets[i] and x < buckets[i + 1]:
return i
return len(buckets) - 1
for key in keys:
bucket = find_bucket(key)
if bucket not in bucket_norms.keys():
bucket_norms[bucket] = []
bucket_norms[bucket].append(norms[key])
variance = []
for i in range(len(buckets)):
if i not in bucket_norms.keys():
bucket_norms[i] = []
variance.append(0)
else:
variance.append(torch.var(torch.stack(bucket_norms[i])))
return variance
def log_var(self, model, niters):
tb_logger = self.tb_logger
gviter = self.opt.gvar_estim_iter
Ege, var_e, snr_e, nv_e = self.gest.get_Ege_var(model, gviter)
Esgd, var_s, snr_s, nv_s = self.sgd.get_Ege_var(model, gviter)
if self.opt.g_estim == 'sgd':
parameters = torch.cat([layer.view(-1)
for layer in self.sgd.grad(model)])
tb_logger.log_histogram('sgd_dist', parameters, step=niters)
norms = self.sgd.get_norm_distribution(
model, gviter, self.opt.nuq_bucket_size)
tb_logger.log_histogram(
'norm_dist', list(norms.keys()), step=niters)
variance = self.create_histogram(norms, [0, 0.01, 0.05, 0.1, 0.2])
for index, var in enumerate(variance):
tb_logger.log_value('var/' + str(index), var, step=niters)
variances, means, total_mean, total_variance, total_variance_normalized, total_mean_normalized, total_mean_unconcatenated, total_variance_unconcatenated = self.sgd.get_gradient_distribution(
model, gviter, self.opt.nuq_bucket_size)
bias = torch.mean(torch.cat(
[(ee-gg).abs().flatten() for ee, gg in zip(Ege, Esgd)]))
if self.opt.g_estim == 'nuq':
if self.opt.nuq_method != 'none':
tb_logger.log_value('bits', float(
self.gest.qdq.bits), step=niters)
tb_logger.log_value('levels', float(
len(self.gest.qdq.levels)), step=niters)
for index, level in enumerate(self.gest.qdq.levels):
tb_logger.log_value(
'levels/' + str(index), float(level), step=niters)
tb_logger.log_value('includes_zero', float(
1 if 0 in self.gest.qdq.levels else 0), step=niters)
number_of_positive_levels = 0
number_of_negative_levels = 0
for level in self.gest.qdq.levels:
if level > 0:
number_of_positive_levels += 1
elif level < 0:
number_of_negative_levels += 1
tb_logger.log_value('positive_levels', float(
number_of_positive_levels), step=niters)
tb_logger.log_value('negative_levels', float(
number_of_negative_levels), step=niters)
if self.gest.qdq.error is not None:
tb_logger.log_value(
'nb_error', self.gest.qdq.error, step=niters)
if self.gest.qdq.grad_dist_nl is not None:
tb_logger.log_value(
'stats/mean', self.gest.qdq.grad_dist_nl.mean, step=niters)
tb_logger.log_value(
'stats/sigma', self.gest.qdq.grad_dist_nl.sigma, step=niters)
if self.opt.nuq_method == 'amq' or self.opt.nuq_method == 'amq_nb':
tb_logger.log_value('multiplier', float(
self.gest.qdq.multiplier), step=niters)
print('est_var is', var_e)
tb_logger.log_value('grad_bias', float(bias), step=niters)
tb_logger.log_value('est_var', float(var_e), step=niters)
tb_logger.log_value('sgd_var', float(var_s), step=niters)
tb_logger.log_value('est_snr', float(snr_e), step=niters)
tb_logger.log_value('sgd_snr', float(snr_s), step=niters)
tb_logger.log_value('est_nvar', float(nv_e), step=niters)
tb_logger.log_value('sgd_nvar', float(nv_s), step=niters)
tb_logger.log_value('tot_var_norm', float(
total_variance_normalized), step=niters)
tb_logger.log_value('tot_var', float(total_variance), step=niters)
tb_logger.log_value('tot_mean_norm', float(
total_mean_normalized), step=niters)
tb_logger.log_value('tot_mean', float(total_mean), step=niters)
tb_logger.log_value('tot_var_norm_layer', float(
total_variance_unconcatenated), step=niters)
tb_logger.log_value('tot_mean_norm_layer', float(), step=niters)
sgd_x, est_x = ('', '[X]') if self.gest_used else ('[X]', '')
return ('G Bias: %.8f\t'
'%sSGD Var: %.8f\t %sEst Var: %.8f\t'
'SGD N-Var: %.8f\t Est N-Var: %.8f\t'
% (43, sgd_x, var_s, est_x, var_e, nv_s, nv_e))
def grad(self, niters):
model = self.model
model.train()
use_sgd = self.use_sgd(niters)
if use_sgd:
self.gest_used = False
return self.sgd.grad(model, in_place=True)
self.gest_used = True
return self.gest.grad(model, in_place=True)
def use_sgd(self, niters):
return not self.opt.g_optim or niters < self.opt.g_optim_start
def state_dict(self):
return self.gest.state_dict()
def load_state_dict(self, state):
self.gest.load_state_dict(state)
| 7,202 | 41.875 | 198 | py |
learning-to-quantize | learning-to-quantize-master/estim/nuq.py | import torch
import torch.nn
import torch.multiprocessing
import numpy as np
import copy
import math
from args import opt_to_nuq_kwargs
from .gestim import GradientEstimator
from nuq.quantize import QuantizeMultiBucket
class NUQEstimator(GradientEstimator):
def __init__(self, *args, **kwargs):
super(NUQEstimator, self).__init__(*args, **kwargs)
self.init_data_iter()
self.qdq = QuantizeMultiBucket(**opt_to_nuq_kwargs(self.opt))
self.ngpu = self.opt.nuq_ngpu
self.acc_grad = None
def state_dict(self):
return {
'qdq': self.qdq.state_dict()
}
def load_state_dict(self, state):
print(state)
self.qdq.load_state_dict(state['qdq'])
def get_norm_distribution(self, model, gviter, bucket_size=1024):
norms = {}
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
flattened_parameters = self._flatten(
minibatch_gradient)
num_bucket = int(np.ceil(len(flattened_parameters) / bucket_size))
for bucket_i in range(num_bucket):
start = bucket_i * bucket_size
end = min((bucket_i + 1) * bucket_size,
len(flattened_parameters))
x_bucket = flattened_parameters[start:end].clone()
if bucket_i not in norms.keys():
norms[bucket_i] = []
norms[bucket_i].append(x_bucket)
return norms
def grad(self, model_new, in_place=False):
model = model_new
ig_sm_bkts = self.opt.nuq_ig_sm_bkts
if self.acc_grad is None:
self.acc_grad = []
with torch.no_grad():
for p in model.parameters():
self.acc_grad += [torch.zeros_like(p)]
else:
for a in self.acc_grad:
a.zero_()
for i in range(self.ngpu):
model.zero_grad()
data = next(self.data_iter)
loss = model.criterion(model, data)
grad = torch.autograd.grad(loss, model.parameters())
layers = len(list(model.parameters()))
per_layer = not self.opt.nuq_layer
with torch.no_grad():
if not per_layer:
flatt_grad = self._flatten(grad)
flatt_grad_q = self.qdq.quantize(flatt_grad, ig_sm_bkts)
grad_like_q = self.unflatten(flatt_grad_q, grad)
for g, a in zip(grad_like_q, self.acc_grad):
a += g / self.ngpu
else:
for g, a in zip(grad, self.acc_grad):
a += self.qdq.quantize(g, ig_sm_bkts) / self.ngpu
if in_place:
for p, a in zip(model.parameters(), self.acc_grad):
if p.grad is None:
p.grad = a.clone()
else:
p.grad.copy_(a)
return loss
return self.acc_grad
class NUQEstimatorMultiGPUParallel(GradientEstimator):
def __init__(self, *args, **kwargs):
super(NUQEstimatorMultiGPUParallel, self).__init__(*args, **kwargs)
self.init_data_iter()
nuq_kwargs = opt_to_nuq_kwargs(self.opt)
self.ngpu = self.opt.nuq_ngpu
self.acc_grad = None
self.models = None
self.qdq = []
for i in range(self.ngpu):
with torch.cuda.device(i):
self.qdq += [QuantizeMultiBucket(**nuq_kwargs)]
def grad(self, model_new, in_place=False):
if self.models is None:
self.models = [model_new]
for i in range(1, self.ngpu):
with torch.cuda.device(i):
self.models += [copy.deepcopy(model_new)]
self.models[-1] = self.models[-1].cuda()
else:
# sync weights
for i in range(1, self.ngpu):
for p0, pi in zip(self.models[0].parameters(),
self.models[i].parameters()):
with torch.no_grad():
pi.copy_(p0)
models = self.models
# forward-backward prop
loss = []
for i in range(self.ngpu):
models[i].zero_grad() # criterion does it
data = next(self.data_iter)
with torch.cuda.device(i):
loss += [models[i].criterion(models[i], data)]
loss[i].backward()
loss = loss[-1]
layers = len(list(models[0].parameters()))
# quantize all grads
for i in range(self.ngpu):
with torch.no_grad():
with torch.cuda.device(i):
torch.cuda.synchronize()
if self.opt.nuq_layer == 1:
flattened_array = self._flatten(
models[i].parameters())
gradient_quantized = self.qdq[i].quantize(
flattened_array, layers) / self.ngpu
unflattened_array = self.unflatten(
gradient_quantized, models[i].parameters())
for p, q in zip(models[i].parameters(),
unflattened_array):
p.grad.copy_(q)
else:
for p in models[i].parameters():
p.grad.copy_(self.qdq[i].quantize(
p.grad, layers) / self.ngpu)
# aggregate grads into gpu0
for i in range(1, self.ngpu):
for p0, pi in zip(models[0].parameters(), models[i].parameters()):
p0.grad.add_(pi.grad.to('cuda:0'))
if in_place:
return loss
acc_grad = []
with torch.no_grad():
for p in models[0].parameters():
acc_grad += [p.grad.clone()]
return acc_grad
| 5,978 | 35.018072 | 78 | py |
learning-to-quantize | learning-to-quantize-master/estim/gestim.py | import torch
import torch.nn
import torch.multiprocessing
import numpy as np
import math
import random
import copy
import logging
from data import InfiniteLoader
class GradientEstimator(object):
def __init__(self, data_loader, opt, tb_logger=None, *args, **kwargs):
self.opt = opt
self.model = None
self.data_loader = data_loader
self.tb_logger = tb_logger
self.niters = 0
self.random_indices = None
def update_niters(self, niters):
self.niters = niters
def init_data_iter(self):
self.data_iter = iter(InfiniteLoader(self.data_loader))
self.estim_iter = iter(InfiniteLoader(self.data_loader))
def snap_batch(self, model):
pass
def update_sampler(self):
pass
def _calc_stats_buckets(self, buckets):
stats = {
'sigma': [],
'mean': []
}
i = 0
for bucket in buckets:
current_bk = torch.stack(buckets[bucket])
stats['mean'].append(torch.mean(current_bk).cpu().item())
stats['sigma'].append(torch.sqrt(torch.mean(
torch.var(current_bk, dim=0, unbiased=False))).cpu().item())
i += 1
return stats
def _get_raw_grad(self, model):
dt = self.data_iter
self.data_iter = self.estim_iter
model.zero_grad()
data = next(self.data_iter)
loss = model.criterion(model, data)
grad = torch.autograd.grad(loss, model.parameters())
self.data_iter = dt
return grad
def _get_grad_samples(self, model, num_of_samples):
grads = []
for i in range(num_of_samples):
grad = self._get_raw_grad(model)
copy_array = []
for layer in grad:
copy_array.append(layer.clone())
grads.append(copy_array)
return grads
def _get_stats_lb(self, grads):
# get stats layer based
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
sep_bias_grad = self.opt.sep_bias_grad
# total number of weights
nw = sum([w.numel() for w in grads[0]])
# total sum of gradients
tsum = torch.zeros(nw).cuda()
buckets = None
total_norm = None
for i, grad in enumerate(grads):
fl_norm_lb = self._flatt_and_normalize_lb(grad, bs, nocat=True)
if buckets is None:
buckets = [[] for j in range(len(fl_norm_lb))]
total_norm = [0.0 for j in range(len(fl_norm_lb))]
fl_norm = self._flatten_lb(grad, nocat=True)
tsum += self._flatten_lb(fl_norm_lb, nocat=False)
for j in range(len(fl_norm_lb)):
buckets[j].append(fl_norm_lb[j])
total_norm[j] += fl_norm[j].norm()
stats = self._calc_stats_buckets(buckets)
stats['norm'] = torch.tensor(total_norm)
return stats
def _get_stats_lb_sep(self, grads):
# get stats layer based
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
sep_bias_grad = self.opt.sep_bias_grad
buckets_bias = {}
total_norm_bias = {}
buckets_weights = {}
total_norm_weights = {}
samples = len(grads)
fl_norm_bias, fl_norm_weights = self._flatten_sep(grads[0])
fl_norm_lb_bias, fl_norm_lb_weights = \
self._flatt_and_normalize_lb_sep(grads[0], bs, nocat=True)
j = 0
for layer in fl_norm_lb_bias:
for bias in layer:
buckets_bias[j] = []
total_norm_bias[j] = 0.0
j += 1
j = 0
for layer in fl_norm_lb_weights:
for weights in layer:
buckets_weights[j] = []
total_norm_weights[j] = 0.0
j += 1
for i, grad in enumerate(grads):
fl_norm_lb_bias, fl_norm_lb_weights = \
self._flatt_and_normalize_lb_sep(grad, bs, nocat=True)
fl_norm_bias, fl_norm_weights = self._flatten_lb_sep(grad, bs)
j = 0
for layer in fl_norm_lb_bias:
for bias in layer:
buckets_bias[j].append(bias)
j += 1
j = 0
for layer in fl_norm_lb_weights:
for weight in layer:
buckets_weights[j].append(weight)
j += 1
j = 0
for layer in fl_norm_bias:
for bias in layer:
total_norm_bias[j] += bias.norm() / samples
j += 1
j = 0
for layer in fl_norm_weights:
for weight in layer:
total_norm_weights[j] += weight.norm() / samples
j += 1
stats_bias = self._calc_stats_buckets(buckets_bias)
stats_bias['norm'] = torch.tensor(list(total_norm_bias.values()))
stats_bias['norm'] = stats_bias['norm'].cpu().tolist()
stats_weights = self._calc_stats_buckets(buckets_weights)
stats_weights['norm'] = torch.tensor(list(total_norm_weights.values()))
stats_weights['norm'] = stats_weights['norm'].cpu().tolist()
stats = {
'bias': stats_bias,
'weights': stats_weights
}
return stats
def _bucketize(self, grad, bs, stats_nb):
ig_sm_bkts = self.opt.nuq_ig_sm_bkts
variance = 0
num_params = 0
tot_sum = 0
num_buckets = int(np.ceil(len(grad) / bs))
for bucket in range(num_buckets):
start = bucket * bs
end = min((bucket + 1) * bs, len(grad))
current_bk = grad[start:end]
norm = current_bk.norm()
current_bk = current_bk / norm
b_len = len(current_bk)
# TODO: REMOVE THIS LINE
if b_len != bs and ig_sm_bkts:
continue
num_params += b_len
var = torch.var(current_bk)
# update norm-less variance
variance += var * (b_len - 1)
tot_sum += torch.sum(current_bk)
stats_nb['norms'].append(norm)
stats_nb['sigmas'].append(torch.sqrt(var))
stats_nb['means'].append(torch.mean(current_bk))
return tot_sum, variance, num_params
def _get_stats_sep(self, grads):
# get stats for weights and bias separately
pass
def _get_stats_nl_lb(self, grads):
# get stats normless
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
samples = len(grads)
tsum = 0.0
tot_var = 0.0
num_params = len(self._flatt_and_normalize_lb(grads[0], bs))
for grad in grads:
params = self._flatt_and_normalize_lb(grad, bs)
tsum += self._flatten([torch.cat(layer)
for layer in params])
mean = tsum / samples
for grad in grads:
params = self._flatt_and_normalize_lb_sep(grad, bs)
tot_var += torch.sum((mean - self._flatten(
[torch.cat(layer) for layer in params])) ** 2)
tot_mean = tsum / num_params
tot_var /= (num_params * samples)
return {
'mean': tot_mean,
'var': tot_var
}
def _get_stats_nl_lb_sep(self, grads):
# get normless stats, bias and weights separated
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
sep_bias_grad = self.opt.sep_bias_grad
samples = len(grads)
tsum_bias = 0.0
tot_var_bias = 0.0
tot_var_weights = 0.0
tsum_weights = 0.0
bias, weights = self._flatt_and_normalize_lb_sep(grads[0], bs)
num_bias = len(torch.cat(bias))
num_weights = len(torch.cat(weights))
for grad in grads:
bias, weights = self._flatt_and_normalize_lb_sep(grad, bs)
tsum_bias += torch.cat(bias)
tsum_weights += torch.cat(weights)
mean_bias = tsum_bias / samples
mean_weights = tsum_weights / samples
for grad in grads:
bias, weights = self._flatt_and_normalize_lb_sep(grad, bs)
tot_var_bias += torch.sum((mean_bias - torch.cat(bias)) ** 2)
tot_var_weights += torch.sum((mean_weights -
torch.cat(weights)) ** 2)
tot_mean_bias = torch.sum(mean_bias) / num_bias
tot_mean_weights = torch.sum(mean_weights) / num_weights
tot_var_weights /= (num_weights * samples)
tot_var_bias /= (num_bias * samples)
stats = {
'bias': {
'sigma': torch.sqrt(tot_var_bias).cpu().item(),
'mean': tot_mean_bias.cpu().item()
},
'weights': {
'sigma': torch.sqrt(tot_var_weights).cpu().item(),
'mean': tot_mean_weights.cpu().item()
}
}
return stats
def _get_stats(self, grads):
# get stats
pass
def snap_online(self, model):
num_of_samples = self.opt.nuq_number_of_samples
grads = self._get_grad_samples(model, num_of_samples)
lb = not self.opt.nuq_layer
sep = True if self.opt.sep_bias_grad == 1 else False
# TODO implement variations of lb and sep
stats = {
'nb': self._get_stats_lb_sep(grads),
'nl': self._get_stats_nl_lb_sep(grads)
}
return stats
def snap_online_mean(self, model):
stats_nb = {
'means': [],
'sigmas': [],
'norms': []
}
total_variance = 0.0
tot_sum = 0.0
num_of_samples = self.opt.nuq_number_of_samples
total_params = 0
bs = self.opt.nuq_bucket_size
lb = not self.opt.nuq_layer
ig_sm_bkts = self.opt.ig_sm_bkts
params = list(model.parameters())
for i in range(num_of_samples):
grad = self._get_raw_grad(model)
if lb:
flattened = self._flatten_lb(grad)
for i, layer in enumerate(flattened):
b_sum, b_var, b_params = self._bucketize(
layer, bs, stats_nb)
tot_sum += b_sum
total_variance += b_var
total_params += b_params
else:
flattened = self._flatten(grad)
b_sum, b_var, b_params = self._bucketize(
flattened, bs, stats_nb)
tot_sum += b_sum
total_variance += b_var
total_params += b_params
nw = sum([w.numel() for w in model.parameters()])
stats_nb['means'] = torch.stack(stats_nb['means']).cpu().tolist()
stats_nb['sigmas'] = torch.stack(stats_nb['sigmas']).cpu().tolist()
stats_nb['norms'] = torch.stack(stats_nb['norms']).cpu().tolist()
if len(stats_nb['means']) > self.opt.dist_num:
indexes = np.argsort(-np.asarray(stats_nb['norms']))[
:self.opt.dist_num]
stats_nb['means'] = np.array(stats_nb['means'])[indexes].tolist()
stats_nb['sigmas'] = np.array(stats_nb['sigmas'])[
indexes].tolist()
stats_nb['norms'] = np.array(stats_nb['norms'])[indexes].tolist()
stats = {
'nb': stats_nb,
'nl': {
'mean': (tot_sum / total_params).cpu().item(),
'sigma':
torch.sqrt(total_variance / total_params).cpu().item(),
}
}
return stats
def grad(self, model_new, in_place=False, data=None):
raise NotImplementedError('grad not implemented')
def _normalize(self, layer, bucket_size, nocat=False):
normalized = []
num_bucket = int(np.ceil(len(layer) / bucket_size))
for bucket_i in range(num_bucket):
start = bucket_i * bucket_size
end = min((bucket_i + 1) * bucket_size, len(layer))
x_bucket = layer[start:end].clone()
norm = x_bucket.norm()
normalized.append(x_bucket / (norm + 1e-7))
if not nocat:
return torch.cat(normalized)
else:
return normalized
def grad_estim(self, model):
# ensuring continuity of data seen in training
# TODO: make sure sub-classes never use any other data_iter, e.g. raw
dt = self.data_iter
self.data_iter = self.estim_iter
ret = self.grad(model)
self.data_iter = dt
return ret
def get_Ege_var(self, model, gviter):
# estimate grad mean and variance
Ege = [torch.zeros_like(g) for g in model.parameters()]
for i in range(gviter):
ge = self.grad_estim(model)
for e, g in zip(Ege, ge):
e += g
for e in Ege:
e /= gviter
nw = sum([w.numel() for w in model.parameters()])
var_e = 0
Es = [torch.zeros_like(g) for g in model.parameters()]
En = [torch.zeros_like(g) for g in model.parameters()]
for i in range(gviter):
ge = self.grad_estim(model)
v = sum([(gg-ee).pow(2).sum() for ee, gg in zip(Ege, ge)])
for s, e, g, n in zip(Es, Ege, ge, En):
s += g.pow(2)
n += (e-g).pow(2)
var_e += v/nw
var_e /= gviter
# Division by gviter cancels out in ss/nn
snr_e = sum(
[((ss+1e-10).log()-(nn+1e-10).log()).sum()
for ss, nn in zip(Es, En)])/nw
nv_e = sum([(nn/(ss+1e-7)).sum() for ss, nn in zip(Es, En)])/nw
return Ege, var_e, snr_e, nv_e
def _flatten_lb_sep(self, gradient, bs=None):
# flatten layer based and handle weights and bias separately
flatt_params = [], []
for layer in gradient:
if len(layer.size()) == 1:
if bs is None:
flatt_params[0].append(
torch.flatten(layer))
else:
buckets = []
flatt = torch.flatten(layer)
num_bucket = int(np.ceil(len(flatt) / bs))
for bucket_i in range(num_bucket):
start = bucket_i * bs
end = min((bucket_i + 1) * bs, len(flatt))
x_bucket = flatt[start:end].clone()
buckets.append(x_bucket)
flatt_params[0].append(
buckets)
else:
if bs is None:
flatt_params[1].append(
torch.flatten(layer))
else:
buckets = []
flatt = torch.flatten(layer)
num_bucket = int(np.ceil(len(flatt) / bs))
for bucket_i in range(num_bucket):
start = bucket_i * bs
end = min((bucket_i + 1) * bs, len(flatt))
x_bucket = flatt[start:end].clone()
buckets.append(x_bucket)
flatt_params[1].append(
buckets)
return flatt_params
def _flatten_lb(self, gradient):
# flatten layer based
flatt_params = []
for layer_parameters in gradient:
flatt_params.append(torch.flatten(layer_parameters))
return flatt_params
def _flatten_sep(self, gradient, bs=None):
# flatten weights and bias separately
flatt_params = [], []
for layer_parameters in gradient:
if len(layer_parameters.size()) == 1:
flatt_params[0].append(
torch.flatten(layer_parameters))
else:
flatt_params[1].append(torch.flatten(layer_parameters))
return torch.cat(flatt_params[0]), torch.cat(flatt_params[1])
def _flatten(self, gradient):
flatt_params = []
for layer_parameters in gradient:
flatt_params.append(torch.flatten(layer_parameters))
return torch.cat(flatt_params)
def unflatten(self, gradient, parameters, tensor=False):
shaped_gradient = []
begin = 0
for layer in parameters:
size = layer.view(-1).shape[0]
shaped_gradient.append(
gradient[begin:begin+size].view(layer.shape))
begin += size
if tensor:
return torch.stack(shaped_gradient)
else:
return shaped_gradient
def _flatt_and_normalize_lb_sep(self, gradient, bucket_size=1024,
nocat=False):
# flatten and normalize weight and bias separately
bs = bucket_size
# totally flat and layer-based layers
flatt_params_lb = self._flatten_lb_sep(gradient)
normalized_buckets_lb = [], []
for bias in flatt_params_lb[0]:
normalized_buckets_lb[0].append(
self._normalize(bias, bucket_size, nocat))
for weight in flatt_params_lb[1]:
normalized_buckets_lb[1].append(
self._normalize(weight, bucket_size, nocat))
return normalized_buckets_lb
def _flatt_and_normalize_lb(self, gradient, bucket_size=1024, nocat=False):
flatt_params_lb = self._flatten_lb(gradient)
normalized_buckets_lb = []
for layer in flatt_params_lb:
normalized_buckets_lb.append(
self._normalize(layer, bucket_size, nocat))
return normalized_buckets_lb
def _flatt_and_normalize(self, gradient, bucket_size=1024, nocat=False):
flatt_params = self._flatten(gradient)
return self._normalize(flatt_params, bucket_size, nocat)
def _flatt_and_normalize_sep(self, gradient,
bucket_size=1024, nocat=False):
flatt_params = self._flatten_sep(gradient)
return [self._normalize(flatt_params[0], bucket_size, nocat),
self._normalize(flatt_params[1], bucket_size, nocat)]
def get_gradient_distribution(self, model, gviter, bucket_size):
"""
gviter: Number of minibatches to apply on the model
model: Model to be evaluated
"""
bucket_size = self.opt.nuq_bucket_size
mean_estimates_normalized = self._flatt_and_normalize(
model.parameters(), bucket_size)
mean_estimates_unconcatenated = self._flatt_and_normalize_lb(
model.parameters(), bucket_size)
# estimate grad mean and variance
mean_estimates = [torch.zeros_like(g) for g in model.parameters()]
mean_estimates_unconcatenated = [torch.zeros_like(
g) for g in mean_estimates_unconcatenated]
mean_estimates_normalized = torch.zeros_like(mean_estimates_normalized)
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
minibatch_gradient_normalized = self._flatt_and_normalize(
minibatch_gradient, bucket_size)
minibatch_gradient_unconcatenated = self._flatt_and_normalize_lb(
minibatch_gradient, bucket_size)
for e, g in zip(mean_estimates, minibatch_gradient):
e += g
for e, g in zip(mean_estimates_unconcatenated, minibatch_gradient_unconcatenated):
e += g
mean_estimates_normalized += minibatch_gradient_normalized
# Calculate the mean
for e in mean_estimates:
e /= gviter
for e in mean_estimates_unconcatenated:
e /= gviter
mean_estimates_normalized /= gviter
# Number of Weights
number_of_weights = sum([layer.numel()
for layer in model.parameters()])
variance_estimates = [torch.zeros_like(g) for g in model.parameters()]
variance_estimates_unconcatenated = [
torch.zeros_like(g) for g in mean_estimates_unconcatenated]
variance_estimates_normalized = torch.zeros_like(
mean_estimates_normalized)
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
minibatch_gradient_normalized = self._flatt_and_normalize(
minibatch_gradient, bucket_size)
minibatch_gradient_unconcatenated = self._flatt_and_normalize_lb(
minibatch_gradient, bucket_size)
v = [(gg - ee).pow(2)
for ee, gg in zip(mean_estimates, minibatch_gradient)]
v_normalized = (mean_estimates_normalized -
minibatch_gradient_normalized).pow(2)
v_normalized_unconcatenated = [(gg - ee).pow(2) for ee, gg in zip(
mean_estimates_unconcatenated, minibatch_gradient_unconcatenated)]
for e, g in zip(variance_estimates, v):
e += g
for e, g in zip(variance_estimates_unconcatenated, v_normalized_unconcatenated):
e += g
variance_estimates_normalized += v_normalized
variance_estimates_normalized = variance_estimates_normalized / gviter
for e in variance_estimates_unconcatenated:
e /= gviter
variances = []
means = []
# random_indices = self.get_random_index(model, 4)
# for index in random_indices:
# variance_estimate_layer = variance_estimates[index[0]]
# mean_estimate_layer = mean_estimates[index[0]]
# for weight in index[1:]:
# variance_estimate_layer = variance_estimate_layer[weight]
# variance_estimate_layer.squeeze_()
# mean_estimate_layer = mean_estimate_layer[weight]
# mean_estimate_layer.squeeze_()
# variance = variance_estimate_layer / (gviter)
# variances.append(variance)
# means.append(mean_estimate_layer)
total_mean = torch.tensor(0, dtype=float)
for mean_estimate in mean_estimates:
total_mean += torch.sum(mean_estimate)
total_variance = torch.tensor(0, dtype=float)
for variance_estimate in variance_estimates:
total_variance += torch.sum(variance_estimate)
total_variance = total_variance / number_of_weights
total_mean = total_mean / number_of_weights
total_variance_normalized = torch.tensor(0, dtype=float)
total_variance_normalized = torch.sum(
variance_estimates_normalized) / number_of_weights
total_mean_normalized = torch.tensor(0, dtype=float)
total_mean_normalized = torch.sum(
mean_estimates_normalized) / number_of_weights
total_mean_unconcatenated = sum([torch.sum(
mean) / mean.numel() for mean in mean_estimates_unconcatenated]) / len(mean_estimates)
total_variance_unconcatenated = sum([torch.sum(variance) / variance.numel(
) for variance in variance_estimates_unconcatenated]) / len(mean_estimates)
return variances, means, total_mean, total_variance, total_variance_normalized, total_mean_normalized, total_mean_unconcatenated, total_variance_unconcatenated
def get_norm_distribution(self, model, gviter, bucket_size=1024):
norms = {}
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
flattened_parameters = self._flatten(
minibatch_gradient)
num_bucket = int(np.ceil(len(flattened_parameters) / bucket_size))
for bucket_i in range(num_bucket):
start = bucket_i * bucket_size
end = min((bucket_i + 1) * bucket_size,
len(flattened_parameters))
if (end == len(flattened_parameters)):
continue
x_bucket = flattened_parameters[start:end].clone()
norm = x_bucket.norm()
if norm.cpu() in norms.keys():
print('An error occured')
norms[norm.cpu()] = x_bucket
return norms
def state_dict(self):
return {}
def load_state_dict(self, state):
pass
def snap_model(self, model):
logging.info('Snap Model')
if self.model is None:
self.model = copy.deepcopy(model)
return
# update sum
for m, s in zip(model.parameters(), self.model.parameters()):
s.data.copy_(m.data)
| 24,608 | 34.105563 | 167 | py |
learning-to-quantize | learning-to-quantize-master/nuq/quantize.py | import numpy as np
import torch
from cuquant import QDQ
import math
from estim.dist import TruncNorm, CondNormalTrunc, CondNormalTruncHist
import time
from scipy.stats import truncnorm, norm
import scipy.integrate as integrate
EPS = 1e-7
def get_quantile_levels(bits, grad_dist):
"""quantile levels """
num_levels = 2 << bits - 1
cdf_points = np.linspace(0, 1, num=num_levels)
levels = [grad_dist.ppf(level) for level in cdf_points]
levels[0] = grad_dist.begin
levels[-1] = grad_dist.end
return levels
def get_ternary_levels():
return np.array([-1, 0, 1])
def get_uniform_levels(bits):
"""uniform (QSGD)"""
num_levels = 2 << bits - 1
levels_uni = np.linspace(-1, 1, num=num_levels)
return levels_uni
def get_uniform_levels(bits):
"""uniform (QSGD)"""
num_levels = 2 << bits - 1
levels_uni = np.linspace(-1, 1, num=num_levels)
return levels_uni
def get_exp_levels(bits, multiplier=0.5):
""" exponential (NUQSGD)
multiplier: is used to modify levels_exp based on the number of bits
"""
num_levels = 2 << bits - 1
levels = sum([[-multiplier**j for j in range(num_levels >> 1)],
[multiplier**j for j in reversed(range(num_levels >> 1))]],
[])
return np.asarray(levels)
def finite_diff_gradient_descent(f, begin, end, x0=None, niters=10, lr=1):
eps = (end-begin)/1000
if x0 is None:
x0 = (begin + end) / 2
x = x0
for i in range(niters):
df = (f(x+eps)-f(x-eps))/(2*eps)
x -= lr*df
return x
def bisection(begin, end, f):
x = (begin + end) / 2
if (np.abs(f(x) - 0) < 1e-7):
return x
both_negative = f(begin) < 0 and f(end) < 0
both_positive = f(begin) > 0 and f(end) > 0
if both_negative or both_positive:
print('Bisection failed')
x_neg_end_pos = f(x) < 0 and f(end) > 0
x_pos_end_neg = f(x) > 0 and f(end) < 0
if x_neg_end_pos or x_pos_end_neg:
return bisection(x, end, f)
return bisection(begin, x, f)
def amq_norm_based(initial_point, grad_dist, bits, lr=0.1, epochs=50):
mul = initial_point
s = 2 ** (bits - 1) - 1
all_mul = []
iter = 0
for epoch in range(epochs):
sum = 0.0
for norm, mean, sigma, coeff in zip(
grad_dist.norms,
grad_dist.means,
grad_dist.sigmas,
grad_dist.coeff):
dist_comp = TruncNorm(
mean, sigma, grad_dist.begin, grad_dist.end, grad_dist.nbins)
# from eq G.3 in Appendix
def arg1_1(j):
return mean * (j * mul ** (j - 1) + (j + 1) * mul ** j) \
- (2 * j + 1) * mul ** (2 * j)
arg1 = np.sum(np.asarray(
[arg1_1(j)*(dist_comp.cdf(mul**j) - dist_comp.cdf(mul**(j+1)))
for j in range(0, s)]))
def arg2_1(j):
return j * mul ** (j - 1) + (j + 1) * mul ** j
arg2 = np.sum(np.asarray(
[arg2_1(j) * (dist_comp.pdf(mul ** (j + 1))
- dist_comp.pdf(mul ** (j)))
for j in range(0, s)]))
sum += coeff * (arg1 + sigma ** 2 * arg2)
gradient = 2 * s * (mul ** (2 * s - 1)) * \
(grad_dist.cdf(mul ** s) - grad_dist.cdf(0)) + sum
mul = mul - lr * gradient
iter += 1
all_mul.append(mul)
return mul, all_mul
def amq_norm_less(initial_point, grad_dist, bits, lr=0.1, epochs=200):
mul = initial_point
s = 2 ** (bits - 1) - 1
mean = grad_dist.mean
sigma = grad_dist.sigma
all_mul = []
iter = 0
for epoch in range(epochs):
sum = 0.0
def arg1_1(j):
return mean * (j * mul ** (j - 1) + (j + 1) * mul ** j) \
- (2 * j + 1) * mul ** (2 * j)
arg1 = np.sum(np.asarray([arg1_1(j) * (
grad_dist.cdf(mul ** j) -
grad_dist.cdf(mul ** (j+1))) for j in range(0, s)]))
def arg2_1(j):
return j * mul ** (j - 1) + (j + 1) * mul ** j
arg2 = np.sum(np.asarray([
arg2_1(j) * (grad_dist.pdf(mul ** (j + 1)) -
grad_dist.pdf(mul ** (j))) for j in range(0, s)]))
gradient = 2 * s * (mul ** (2 * s - 1)) * \
(grad_dist.cdf(mul ** s) - grad_dist.cdf(0)) \
+ arg1 + sigma ** 2 * arg2
mul = mul - lr * gradient
iter += 1
all_mul.append(mul)
return mul, all_mul
def alq(initial_levels, grad_dist, epochs, inv=False, sym=True):
losses = []
# Assuming last level is 1, setting first dummy level to 0
if sym:
positive_levels = initial_levels[len(initial_levels) // 2:]
new_levels = [0] + list(positive_levels).copy()
else:
new_levels = list(initial_levels).copy()
all_levels = [new_levels.copy()]
for epoch in range(epochs):
def objective(x, left_level, right_level):
# from equation below corollary 1
left_var = grad_dist.est_var_adjacent_levels(left_level, x)
right_var = grad_dist.est_var_adjacent_levels(x, right_level)
return left_var+right_var
for index in range(1, len(new_levels)-1):
left_level = new_levels[index - 1]
right_level = new_levels[index + 1]
if inv:
new_levels[index] = grad_dist.estimate_variance_adj_inv(
left_level, right_level)
else:
new_levels[index] = finite_diff_gradient_descent(
lambda x: objective(x, left_level, right_level),
left_level, right_level, x0=new_levels[index])
assert new_levels[index] < right_level and \
new_levels[index] > left_level, \
"New level is not in the interval"
if sym:
negative_levels = [-level for level in new_levels]
negative_levels.reverse()
losses.append(grad_dist.estimate_variance(
negative_levels[:-1] + new_levels[1:]))
all_levels.append(new_levels.copy())
else:
losses.append(grad_dist.estimate_variance(new_levels))
all_levels.append(new_levels.copy())
if sym:
# dropping dummy level at 0
new_levels = new_levels[1:]
negative_levels = [-level for level in new_levels]
negative_levels.reverse()
new_levels = negative_levels + new_levels
return new_levels, all_levels, losses
def get_exp_levels(bits, multiplier):
""" exponential (NUQSGD)
multiplier: is used to modify levels_exp based on the number of bits
"""
num_levels = 2 << bits - 1
# if bits == 2:
# multiplier = 0.1
# elif bits == 4:
# multiplier = 0.5
# elif bits == 6:
# multiplier = 0.9
# elif bits == 8:
# multiplier = 0.95
levels = sum([[-multiplier**j for j in range(num_levels >> 1)],
[multiplier**j for j in reversed(range(num_levels >> 1))]],
[])
return levels
def get_exp_levels(bits, multiplier):
""" exponential (NUQSGD)
multiplier: is used to modify levels_exp based on the number of bits
"""
num_levels = 2 << bits - 1
# if bits == 2:
# multiplier = 0.1
# elif bits == 4:
# multiplier = 0.5
# elif bits == 6:
# multiplier = 0.9
# elif bits == 8:
# multiplier = 0.95
levels = sum([[-multiplier**j for j in range(num_levels >> 1)],
[multiplier**j for j in reversed(range(num_levels >> 1))]],
[])
return levels
class QuantizeMultiBucket(object):
def __init__(self, method, bits, bucket_size, multiplier, **kwargs):
"""
QSGD: qdqL2 + levels_uni
NUQSGD: qdqL2 + levels_exp
QSGD-inf: qdqLinf + levels_uni
"""
self.method = method
self.multiplier = multiplier
if kwargs['interval'] != None:
self.interval = kwargs['interval']
a, b = (-self.interval - 0) / 0.1, (self.interval - 0) / 0.1
if method == 'q':
self.levels = get_uniform_levels(bits)
self.norm_type = 'fro'
elif method == 'nuq':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'qinf':
self.levels = get_uniform_levels(bits)
self.norm_type = float('inf')
elif method == 'nuq2':
self.levels = get_quantile_levels(
bits, 0, 0.1, -self.interval, self.interval)
self.norm_type = 'fro'
elif method == 'nuq2inf':
self.levels = get_quantile_levels(
bits, 0, 0.1, -self.interval, self.interval)
self.norm_type = float('inf')
elif method == 'amq':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'amq_nb':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'alq':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'alq_nb':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'trn':
self.levels = get_ternary_levels()
self.norm_type = float('inf')
elif method == 'none':
return
self.number_of_iterations = 0
self.gradient_samples = []
self.gradient_samples_overtime = []
self.previous_best = None
self.bucket_size = bucket_size
self.bits = bits
self.epochs = kwargs['cd_epochs']
self.path = kwargs['path']
self.amq_lr = kwargs['amq_lr']
self.amq_epochs = kwargs['amq_epochs']
self.symmetric = kwargs['symmetric']
self.inv = kwargs['inv']
self.levels = torch.as_tensor(self.levels, dtype=torch.float32).cuda()
self.qdq = QDQ(self.levels)
self.mean_weights = 0
self.variance_weights = 0.1
self.error = None
def set_mean_variance(self, stats):
self.mean = mean = stats['nl']['mean']
self.variance = variance = stats['nl']['sigma'] ** 2
self.norms = norms = stats['nb']
self.number_of_iterations += 1
interval = self.interval
sigma = torch.sqrt(torch.tensor(self.variance)).cpu().item()
self.grad_dist_nb = CondNormalTruncHist(
norms['means'], norms['sigmas'], norms['norms'], -interval,
interval, nbins=100000, bin_type='linear')
self.grad_dist_nl = TruncNorm(
mean, sigma, -interval, interval, nbins=100000, bin_type='linear')
self.error = self.grad_dist_nb.estimate_variance(self.levels.cpu())
if self.method == 'amq':
np.savetxt(self.path + '/norms_mean' +
str(self.number_of_iterations), np.asarray(self.norms['means']))
np.savetxt(self.path + '/norms_sigma' +
str(self.number_of_iterations), np.asarray(self.norms['sigmas']))
np.savetxt(self.path + '/norms_norm' +
str(self.number_of_iterations), np.asarray(self.norms['norms']))
def update_levels(self):
interval = self.interval
mean = self.mean
bits = self.bits
variance = self.variance
grad_dist_nl = self.grad_dist_nl
grad_dist_nb = self.grad_dist_nb
sigma = torch.sqrt(torch.tensor(self.variance)).cpu().item()
half_point = int(len(self.levels) / 2)
quantile_levels = get_quantile_levels(bits, grad_dist_nb)
uniform_levels = get_uniform_levels(
self.bits)
exp_levels = get_exp_levels(
self.bits, 0.5)
bits = self.bits
if self.method == 'alq':
inv = self.inv
sym = self.symmetric
epochs = self.epochs
initial_levels = self.levels
levels_qua, _, losses_qua = alq(
quantile_levels, grad_dist_nl, epochs, inv, sym)
levels_uniform, _, losses_uni = alq(
uniform_levels, grad_dist_nl, epochs, inv, sym)
levels_exp, _, losses_exp = alq(
exp_levels, grad_dist_nl, epochs, inv, sym)
candidate_levels = np.asarray(
[levels_qua, levels_uniform, levels_exp])
candidate_losses = np.asarray(
[losses_qua[-1], losses_uni[-1], losses_exp[-1]])
self.levels = candidate_levels[np.argsort(candidate_losses)][0]
elif self.method == 'alq_nb':
epochs = self.epochs
inv = self.inv
sym = self.symmetric
quantile_levels = get_quantile_levels(bits, grad_dist_nb)
levels_qua, _, losses_qua = alq(
quantile_levels, grad_dist_nb, epochs, inv, sym)
levels_uniform, _, losses_uni = alq(
uniform_levels, grad_dist_nb, epochs, inv, sym)
levels_exp, _, losses_exp = alq(
exp_levels, grad_dist_nb, epochs, inv, sym)
candidate_levels = np.asarray(
[levels_qua, levels_uniform, levels_exp])
candidate_losses = np.asarray(
[losses_qua[-1], losses_uni[-1], losses_exp[-1]])
self.levels = candidate_levels[np.argsort(candidate_losses)][0]
elif self.method == 'amq':
initial_points = []
if self.previous_best is None:
initial_points = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8, 0.9]
else:
initial_points = [0.1, 0.2, 0.3, 0.4,
self.previous_best, 0.5, 0.8, 0.9]
optimal_points = []
for point in initial_points:
optimal_p, _ = amq_norm_less(point, grad_dist_nl, bits, self.amq_lr, self.amq_epochs)
optimal_points.append(optimal_p)
optimal_points_costs = [
grad_dist_nl.estimate_variance(get_exp_levels(bits, p)[
half_point:]) for p in optimal_points]
index = np.argmin(optimal_points_costs)
self.multiplier = optimal_points[index]
self.previous_best = self.multiplier
self.levels = get_exp_levels(bits, self.multiplier)
elif self.method == 'amq_nb':
initial_points = []
if self.previous_best is None:
initial_points = [0.1, 0.2, 0.3, 0.4, 0.5, 0.8, 0.9]
else:
initial_points = [0.1, 0.2, 0.3, 0.4,
self.previous_best, 0.5, 0.8, 0.9]
optimal_points = []
for point in initial_points:
optimal_p, _ = amq_norm_based(point, grad_dist_nb, bits, self.amq_lr, self.amq_epochs)
optimal_points.append(optimal_p)
optimal_points_costs = [
grad_dist_nb.estimate_variance(get_exp_levels(bits, p)[
half_point:]) for p in optimal_points]
index = np.argmin(optimal_points_costs)
self.multiplier = optimal_points[index]
self.previous_best = self.multiplier
self.levels = get_exp_levels(self.bits, self.multiplier)
self.levels = torch.as_tensor(self.levels, dtype=torch.float32).cuda()
self.qdq = QDQ(self.levels)
def quantize(self, x, ig_sm_bkts):
if self.method == 'none':
return x
assert isinstance(x, torch.cuda.FloatTensor)
bucket_size = self.bucket_size
num_tail = math.ceil(x.numel()/bucket_size)*bucket_size-x.numel()
xv = torch.cat((x.view(-1),
torch.zeros(num_tail, dtype=x.dtype, device=x.device)))
xv = xv.view(-1, bucket_size)
norm = xv.norm(p=self.norm_type, dim=1, keepdim=True).expand(
xv.shape[0], xv.shape[1]).contiguous().view(-1).contiguous()
if ig_sm_bkts:
if xv.shape[0] > 1:
q = torch.zeros_like(xv)
r = torch.randint_like(xv, 1000001).long()
self.qdq.qdqGPU(xv[:-1], norm[:-1], q[:-1], r[:-1])
return torch.cat([q[:-1].view(-1), xv[-1][:-num_tail].view(-1)]).view(x.shape)
else:
return xv[-1][:-num_tail].view(x.shape)
else:
q = torch.zeros_like(x)
r = torch.randint_like(x, 1000001).long()
self.qdq.qdqGPU(x, norm, q, r)
return q
def state_dict(self):
if self.method == 'none':
return {}
return {
'levels': self.levels,
'means': self.grad_dist_nb.means,
'sigmas': self.grad_dist_nb.sigmas,
'norms': self.grad_dist_nb.norms,
'sigma': self.grad_dist_nl.sigma,
'mean': self.grad_dist_nl.mean,
'error': self.error
}
def load_state_dict(self, state):
if self.method == 'none':
return
self.levels = state['levels']
self.grad_dist_nb = CondNormalTruncHist(
state['means'], state['sigmas'], state['norms'], -1,
1, nbins=100000, bin_type='linear')
self.grad_dist_nl = TruncNorm(
state['mean'], state['sigma'], -1,
1, nbins=100000, bin_type='linear')
self.qdq = QDQ(self.levels)
self.error = state['error']
| 17,549 | 35.036961 | 102 | py |
learning-to-quantize | learning-to-quantize-master/nuq/cuda/test.py | import torch
import cuquant as qdq
import numpy as np
def test_qdq_gpu():
if not torch.cuda.is_available():
return
x = torch.randn(1000).cuda().uniform_(-1, 1)
q = qdq.qdq_gpu(x)
dq = np.unique(q.cpu().numpy())
print('x', x)
print('q', q)
print('unique q', dq)
print('# unique q', len(dq))
if __name__ == '__main__':
test_qdq_gpu()
| 380 | 18.05 | 48 | py |
learning-to-quantize | learning-to-quantize-master/nuq/cuda/qdq.py | import torch
import math
from cuquant import QDQ
def get_uniform_levels(bits):
num_levels = 2 << bits - 1
levels_uni = torch.linspace(-1, 1, steps=num_levels)
return levels_uni
def qdq_gpu(a):
assert isinstance(a, torch.cuda.FloatTensor)
bucket_size = 16
asize = a.size()
num_tail = math.ceil(a.numel()/bucket_size)*bucket_size-a.numel()
av = torch.cat((a.view(-1), torch.zeros_like(a)[:num_tail]))
c = torch.zeros_like(a)
av = av.view(-1, bucket_size)
norm = av.norm(dim=1, keepdim=True).expand(
av.shape[0], av.shape[1]).contiguous().view(-1).contiguous()
print('norm', norm)
r = torch.randint_like(a, 1000001).long()
levels = get_uniform_levels(4).cuda()
print('levels', levels)
print('#levels', len(levels))
qdq = QDQ(levels)
qdq.qdqGPU(a, norm, c, r)
return c.view(asize)
| 867 | 26.125 | 69 | py |
learning-to-quantize | learning-to-quantize-master/nuq/cuda/setup.py | import os
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
os.system('make -j%d' % os.cpu_count())
# Python interface
setup(
name='CuQuantize',
version='0.1.0',
install_requires=['torch'],
packages=['cuquant'],
package_dir={'cuquant': './'},
ext_modules=[
CUDAExtension(
name='cuquant_back',
include_dirs=['./'],
sources=[
'pybind/bind.cpp',
],
libraries=['cuquant'],
library_dirs=['objs'],
# extra_compile_args=['-g']
)
],
cmdclass={'build_ext': BuildExtension},
description='Quantize-Dequantize cuda kernel',
zip_safe=False,
)
| 735 | 23.533333 | 67 | py |
learning-to-quantize | learning-to-quantize-master/nuq/cuda/__init__.py | import torch
from cuquant_back import QDQ
from .qdq import qdq_gpu
| 69 | 10.666667 | 28 | py |
learning-to-quantize | learning-to-quantize-master/grid/cluster.py | from __future__ import print_function
def ssh(sargs):
"""
rm jobs/*.sh jobs/log/* -f && python grid_run.py --grid G --run_name X
pattern=""; for i in 1 2; do ./kill.sh $i $pattern; done
./start.sh
"""
jobs_0 = ['machine0_gpu0', 'machine0_gpu1',
'machine1_gpu0', 'machine1_gpu1',
]
# validate start.sh
njobs = [2]*4 # Number of parallel jobs on each machine
jobs = []
for s, n in zip(jobs_0, njobs):
jobs += ['%s_job%d' % (s, i) for i in range(n)]
parallel = False # each script runs in sequence
return jobs, parallel
def slurm(sargs, prefix):
"""
rm jobs/*.sh jobs/log/* -f && python grid_run.py --grid G --run_name X \
--cluster_args <njobs>,<ntasks>,<partitions>
pattern=""; for i in 1 2; do ./kill.sh $i $pattern; done
sbatch jobs/slurm.sbatch
"""
njobs, ntasks, partition = sargs.split(',', 2)
njobs = int(njobs)
ntasks = int(ntasks)
# njobs = 5 # Number of array jobs
# ntasks = 4 # Number of running jobs
partition = 'gpu'
jobs = [str(i) for i in range(njobs)]
sbatch_f = """#!/bin/bash
#SBATCH --job-name=array
#SBATCH --output=jobs/log/array_%A_%a.log
#SBATCH --array=0-{njobs}
#SBATCH --time=300:00:00
#SBATCH --gres=gpu:1 # Number of GPUs (per node)
#SBATCH -c 3
#SBATCH --mem=18G
#SBATCH --mail-type=ALL,ARRAY_TASKS
#SBATCH --mail-user=iman.tabrizian+slurm@gmail.com
#SBATCH -p {partition}
#SBATCH --ntasks=1
date; hostname; pwd
python -c "import torch; print(torch.__version__)"
(while true; do nvidia-smi; top -b -n 1 | head -20; sleep 10; done) &
# the environment variable SLURM_ARRAY_TASK_ID contains
# the index corresponding to the current job step
source $HOME/Code/nuqsgd/nuqsgd.sh
bash jobs/{prefix}_$SLURM_ARRAY_TASK_ID.sh
""".format(njobs=njobs-1, ntasks=ntasks, partition=partition, prefix=prefix)
with open('jobs/' + prefix + '_slurm.sbatch', 'w') as f:
print(sbatch_f, file=f)
parallel = True # each script runs in parallel
return jobs, parallel
| 2,057 | 31.666667 | 76 | py |
learning-to-quantize | learning-to-quantize-master/main/gvar.py | from __future__ import print_function
import numpy as np
import logging
import os
import sys
import torch
import torch.nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.nn.functional as F
import torch.multiprocessing
import utils
import models
from data import get_loaders
from args import get_opt
from log_utils import TBXWrapper
from log_utils import Profiler
from estim.optim import OptimizerFactory
from tensorboardX import SummaryWriter
tb_logger = TBXWrapper()
def test(tb_logger, model, test_loader,
opt, niters, set_name='Test', prefix='V'):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target, idx in test_loader:
if opt.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = F.nll_loss(output, target, reduction='none')
test_loss += loss.sum().item()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
wrong = len(test_loader.dataset) - correct
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
error = 100. * wrong / len(test_loader.dataset)
logging.info(
'\n{0} set: Average loss: {1:.4f}'
', Accuracy: {2}/{3} ({4:.2f}%)'
', Error: {5}/{3} ({6:.2f}%)\n'.format(
set_name, test_loss, correct, len(test_loader.dataset),
accuracy, wrong, error))
tb_logger.log_value('%sloss' % prefix, test_loss, step=niters)
tb_logger.log_value('%scorrect' % prefix, correct, step=niters)
tb_logger.log_value('%swrong' % prefix, wrong, step=niters)
tb_logger.log_value('%sacc' % prefix, accuracy, step=niters)
tb_logger.log_value('%serror' % prefix, error, step=niters)
return accuracy
def train(tb_logger, epoch, train_loader, model, optimizer, opt, test_loader,
save_checkpoint, train_test_loader):
batch_time = Profiler()
model.train()
profiler = Profiler()
init_iters = optimizer.niters % opt.epoch_iters
optimizer.logger.reset()
for batch_idx in range(init_iters, opt.epoch_iters):
profiler.start()
# sgd step
loss = optimizer.step(profiler)
batch_time.toc('Time')
batch_time.end()
optimizer.niters += 1
niters = optimizer.niters
# if True:
if batch_idx % opt.log_interval == 0:
gvar_log = ''
prof_log = ''
if optimizer.gvar.is_log_iter(niters):
gvar_log = '\t' + optimizer.gvar.log_var(model, niters)
if opt.log_profiler:
prof_log = '\t' + str(profiler)
logging.info(
'Epoch: [{0}][{1}/{2}]({niters})\t'
'Loss: {loss:.6f}\t'
'{batch_time}\t'
'{opt_log}{gvar_log}{prof_log}'.format(
epoch, batch_idx, len(train_loader),
loss=loss.item(),
batch_time=str(batch_time),
opt_log=str(optimizer.logger),
gvar_log=gvar_log,
prof_log=prof_log,
niters=niters))
if batch_idx % opt.tblog_interval == 0:
tb_logger.log_value('epoch', epoch, step=niters)
lr = optimizer.param_groups[0]['lr']
tb_logger.log_value('lr', lr, step=niters)
tb_logger.log_value('niters', niters, step=niters)
tb_logger.log_value('batch_idx', batch_idx, step=niters)
tb_logger.log_value('loss', loss, step=niters)
optimizer.logger.tb_log(tb_logger, step=niters)
if optimizer.niters % opt.epoch_iters == 0:
if opt.train_accuracy:
test(tb_logger,
model, train_test_loader, opt, optimizer.niters,
'Train', 'T')
if optimizer.niters % opt.chkpt_iter == 0 or optimizer.niters % opt.epoch_iters == 0:
prec1 = test(tb_logger,
model, test_loader, opt, optimizer.niters)
save_checkpoint(model, float(prec1), opt, optimizer,
gvar=optimizer.gvar)
tb_logger.save_log()
def untrain(model, gvar, opt):
steps = opt.untrain_steps
lr = opt.untrain_lr
std = opt.untrain_std
for batch_idx in range(steps):
loss = gvar.grad(-1)
with torch.no_grad():
for p in model.parameters():
p += p.grad*lr # ascent
p += torch.zeros_like(p.grad).normal_(0, std) # noise
if batch_idx % opt.log_interval == 0:
logging.info(
'Untrain: [{0}/{1}]\t'
'Loss: {loss:.6f}'.format(
batch_idx, steps, loss=loss.item()))
def main():
opt = get_opt()
tb_logger.configure(opt.logger_name, flush_secs=5, opt=opt)
logfname = os.path.join(opt.logger_name, 'log.txt')
logging.basicConfig(
filename=logfname,
format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(opt.d))
torch.manual_seed(opt.seed)
if opt.cuda:
# TODO: remove deterministic
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed(opt.seed)
np.random.seed(opt.seed)
# helps with wide-resnet by reducing memory and time 2x
cudnn.benchmark = True
train_loader, test_loader, train_test_loader = get_loaders(opt)
if opt.epoch_iters == 0:
opt.epoch_iters = int(
np.ceil(1. * len(train_loader.dataset) / opt.batch_size))
opt.maxiter = opt.epoch_iters * opt.epochs
if opt.g_epoch:
opt.gvar_start *= opt.epoch_iters
opt.g_optim_start = (opt.g_optim_start * opt.epoch_iters) + 1
model = models.init_model(opt)
optimizer = OptimizerFactory(model, train_loader, tb_logger, opt)
epoch = 0
save_checkpoint = utils.SaveCheckpoint()
# optionally resume from a checkpoint
if not opt.noresume:
model_path = os.path.join(opt.logger_name, opt.ckpt_name)
if os.path.isfile(model_path):
print("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path)
best_prec1 = checkpoint['best_prec1']
optimizer.gvar.load_state_dict(checkpoint['gvar'])
optimizer.niters = checkpoint['niters']
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
save_checkpoint.best_prec1 = best_prec1
print("=> loaded checkpoint '{}' (epoch {}, best_prec {})"
.format(model_path, epoch, best_prec1))
else:
print("=> no checkpoint found at '{}'".format(model_path))
if opt.niters > 0:
max_iters = opt.niters
else:
max_iters = opt.epochs * opt.epoch_iters
if opt.untrain_steps > 0:
untrain(model, optimizer.gvar, opt)
while optimizer.niters < max_iters:
optimizer.epoch = epoch
utils.adjust_lr(optimizer, opt)
ecode = train(
tb_logger,
epoch, train_loader, model, optimizer, opt, test_loader,
save_checkpoint, train_test_loader)
if ecode == -1:
break
epoch += 1
tb_logger.save_log()
if __name__ == '__main__':
main()
| 7,579 | 34.754717 | 94 | py |
PyBDSF | PyBDSF-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# PyBDSF documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 19 13:27:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyBDSF'
copyright = u'2022, David Rafferty and Niruj Mohan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.11'
# The full version, including alpha/beta/rc tags.
release = '1.11.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'front_pic.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyBDSFdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyBDSF.tex', u'PyBDSF Documentation',
u'David Rafferty and Niruj Mohan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pybdsf', u'PyBDSF Documentation',
[u'David Rafferty and Niruj Mohan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyBDSF', u'PyBDSF Documentation',
u'David Rafferty and Niruj Mohan', 'PyBDSF', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'PyBDSF'
epub_author = u'David Rafferty and Niruj Mohan'
epub_publisher = u'David Rafferty and Niruj Mohan'
epub_copyright = u'2022, David Rafferty and Niruj Mohan'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 9,149 | 30.6609 | 83 | py |
confident-sinkhorn-allocation | confident-sinkhorn-allocation-master/setup.py | from setuptools import setup, find_packages
setup(
name='csa',
version='1.0',
packages=find_packages(),
include_package_data = True,
description='Confident Sinkhorn Allocation',
install_requires=[
"colorama>=0.4.5",
"cycler>=0.11.0",
"fonttools>=4.33.3",
"joblib>=1.1.0",
"kiwisolver>=1.4.3",
"matplotlib>=3.1.2",
"numpy>=1.21.0",
"packaging>=21.3",
"pandas>=1.2.3",
"Pillow>=9.2.0",
"pyparsing>=3.0.9",
"python-dateutil>=2.8.2",
"pytz>=2022.1",
"scikit-learn>=1.0",
"scipy>=1.7.1",
"six>=1.16.0",
"threadpoolctl>=3.1.0",
"tqdm>=4.64.0",
"xgboost>=1.6.1",
],
) | 741 | 23.733333 | 48 | py |
confident-sinkhorn-allocation | confident-sinkhorn-allocation-master/algorithm/flexmatch.py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 14:19:22 2021
@author: Vu Nguyen
"""
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
from scipy import stats
from .pseudo_labeling import Pseudo_Labeling
# FlexMatch Strategy for Pseudo-Labeling =======================================================================
# Zhang, Bowen, Yidong Wang, Wenxin Hou, Hao Wu, Jindong Wang, Manabu Okumura, and Takahiro Shinozaki.
# "Flexmatch: Boosting semi-supervised learning with curriculum pseudo labeling." NeurIPS 2021
class FlexMatch(Pseudo_Labeling):
# adaptive thresholding
def __init__(self, unlabelled_data, x_test,y_test,num_iters=5,upper_threshold = 0.9, verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
super().__init__( unlabelled_data, x_test,y_test,num_iters=num_iters,upper_threshold=upper_threshold,verbose=verbose,IsMultiLabel=IsMultiLabel)
self.algorithm_name="FlexMatch"
def predict(self, X):
super().predict(X)
def predict_proba(self, X):
super().predict_proba(X)
def evaluate_performance(self):
super().evaluate_performance()
def get_max_pseudo_point(self,class_freq,current_iter):
return super().get_max_pseudo_point(class_freq,current_iter)
def label_assignment_and_post_processing_FlexMatch(self, pseudo_labels_prob,X,y, current_iter=0,upper_threshold=None):
"""
Given the threshold, perform label assignments and augmentation
This function is particular for FlexMatch
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
# for each class, count the number of points > threshold
# this is the technique used in FlexMatch
countVector=[0]*self.nClass
for cc in range(self.nClass):
temp=np.where(max_prob_matrix[:,cc]>self.upper_threshold)[0]
countVector[cc]= len( temp )
countVector_normalized=np.asarray(countVector)/np.max(countVector)
if upper_threshold is None:
upper_threshold=self.upper_threshold
# assign labels if the prob > threshold ========================================================
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
# note that in FlexMatch, the upper_threshold is updated below before using as the threshold
flex_class_upper_thresh=countVector_normalized[cc]*self.upper_threshold
# obtain the maximum number of points can be assigned per class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1] # decreasing
temp_idx = np.where(max_prob_matrix[idx_sorted,cc] > flex_class_upper_thresh )[0]
labels_satisfied_threshold=idx_sorted[temp_idx]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold = labels_satisfied_threshold[:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
# post-processing and augmenting the data into X and Y ==========================================
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
# Fit to data
self.model.fit(X, y)
self.evaluate_performance()
# estimate prob using unlabelled data
pseudo_labels_prob=self.get_predictive_prob_for_unlabelled_data(self.model)
#go over each row (data point), only keep the argmax prob
# max_prob=[0]*num_points
# max_prob_matrix=np.zeros((pseudo_labels_prob.shape))
# for ii in range(num_points):
# idxMax=np.argmax(pseudo_labels_prob[ii,:])
# max_prob_matrix[ii,idxMax]=pseudo_labels_prob[ii,idxMax]
# max_prob[ii]=pseudo_labels_prob[ii,idxMax]
# for each class, count the number of points > threshold
# countVector=[0]*self.nClass
# for cc in range(self.nClass):
# idx_above_threshold=np.where(max_prob_matrix[:,cc]>self.upper_threshold)[0]
# countVector[cc]= len( idx_above_threshold ) # count number of unlabeled data above the threshold
# countVector_normalized=np.asarray(countVector)/np.max(countVector)
# if self.verbose:
# print("class threshold:", np.round(countVector_normalized*self.upper_threshold,2))
X,y=self.label_assignment_and_post_processing_FlexMatch( pseudo_labels_prob,X,y, current_iter=0)
# augmented_idx=[]
# for cc in range(self.nClass):
# # compute the adaptive threshold for each class
# class_upper_thresh=countVector_normalized[cc]*self.upper_threshold
# MaxPseudoPoint=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
# idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1][:MaxPseudoPoint] # decreasing
# idx_above_threshold = np.where(max_prob_matrix[idx_sorted,cc] > class_upper_thresh)[0]
# labels_within_threshold= idx_sorted[idx_above_threshold]
# augmented_idx += labels_within_threshold.tolist()
# X,y = self.post_processing(cc,labels_within_threshold,X,y)
if self.verbose:
print("#augmented:", self.num_augmented_per_class, " len of training data ", len(y))
if np.sum(self.num_augmented_per_class)==0: # no data point is augmented
return #self.test_acc
# remove the selected data from unlabelled data
#self.unlabelled_data = np.delete(self.unlabelled_data, np.unique(augmented_idx), 0)
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance()
| 8,767 | 42.405941 | 151 | py |
confident-sinkhorn-allocation | confident-sinkhorn-allocation-master/algorithm/pseudo_labeling.py |
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from sklearn.multioutput import MultiOutputClassifier
import copy
import sklearn
class Pseudo_Labeling(object):
# implementation of the master class for pseudo-labeling
# this class will be inherited across other subclasses
def __init__(self, unlabelled_data, x_test,y_test, num_iters=5,upper_threshold = 0.8, \
fraction_allocation=1,lower_threshold = None,num_XGB_models=0, \
verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
self.IsMultiLabel=False
self.algorithm_name="Pseudo_Labeling"
self.x_test=x_test
self.y_test=y_test
self.IsMultiLabel=IsMultiLabel
# for house keeping and reporting purpose
self.len_unlabels=[]
self.len_accepted_ttest=[]
self.len_selected=[]
self.num_augmented_per_class=[]
# this is the XGBoost model for multi-class classification
param = {}
param['booster'] = 'gbtree'
param['objective'] = 'binary:logistic'
param['verbosity'] = 0
param['silent'] = 1
param['seed'] = 0
# create XGBoost instance with default hyper-parameters
#xgb = XGBClassifier(**param,use_label_encoder=False)
xgb = self.get_XGB_model(param)
self.model = copy.copy(xgb)
self.unlabelled_data = unlabelled_data # this is a temporary unlabelled data changing in each iteration
self.verbose = verbose
self.upper_threshold = upper_threshold
self.num_iters=num_iters
if lower_threshold is not None:
self.lower_threshold = lower_threshold # this lower threshold is used for UPS algorithm, not the vanilla Pseudo-labeling
# allow the pseudo-data is repeated, e.g., without removing them after each iteration
# create a list of all the indices
self.unlabelled_indices = list(range(unlabelled_data.shape[0]))
self.selected_unlabelled_index=[]
if self.verbose:
print("no of unlabelled data:",unlabelled_data.shape[0], "\t no of test data:",x_test.shape[0])
# Shuffle the indices
np.random.shuffle(self.unlabelled_indices)
self.test_acc=[]
self.FractionAllocatedLabel=fraction_allocation # we will allocate labels to 100% of the unlabeled dataset
self.num_XGB_models=num_XGB_models # this is the parameter M in our paper
if num_XGB_models>1: # will be used for CSA and UPS
# for uncertainty estimation
# generate multiple models
params = { 'max_depth': np.arange(3, 20).astype(int),
'learning_rate': [0.01, 0.1, 0.2, 0.3],
'subsample': np.arange(0.5, 1.0, 0.05),
'colsample_bytree': np.arange(0.4, 1.0, 0.05),
'colsample_bylevel': np.arange(0.4, 1.0, 0.05),
'n_estimators': [100, 200, 300, 500, 600, 700, 1000]}
self.XGBmodels_list=[0]*self.num_XGB_models
param_list=[0]*self.num_XGB_models
for tt in range(self.num_XGB_models):
param_list[tt]={}
for key in params.keys():
mychoice=np.random.choice(params[key])
param_list[tt][key]=mychoice
param_list[tt]['verbosity'] = 0
param_list[tt]['silent'] = 1
param_list[tt]['seed'] = tt
#self.XGBmodels_list[tt] = XGBClassifier(**param_list[tt],use_label_encoder=False)
self.XGBmodels_list[tt] = self.get_XGB_model(param_list[tt])
def get_XGB_model(self,param):
"""
we create the XGB model depending on multiclass or multi-label setting
Args:
param: a predefined hyperparameter for XGBmodel
Output:
a single XGBClassifier for multiclass
or
a single MultiOutputClassifier for multilabel
"""
if self.IsMultiLabel==False:
return XGBClassifier(**param,use_label_encoder=False)
else:
return MultiOutputClassifier(XGBClassifier(**param,use_label_encoder=False))
def get_predictive_prob_for_unlabelled_data(self, model):
"""
Compute the predictive probability within [0,1] for unlabelled data given a single XGB model
Args:
model: a single XGBmodel
Output:
predictive probability matrix [N x K]
"""
pseudo_labels_prob = model.predict_proba(self.unlabelled_data)
# number of unlabeled data
if self.IsMultiLabel==True:
pseudo_labels_prob=np.asarray(pseudo_labels_prob).T
pseudo_labels_prob=pseudo_labels_prob[1,:,:]
return pseudo_labels_prob
def estimate_label_frequency(self, y):
"""
estimate the label frequency empirically from the initial labeled data
Args:
y: label vector or matrix (multilabel)
Output:
Given K the number of labels, it returns a vector of label frequency [1 x K]
"""
if self.IsMultiLabel==False:
if len(self.num_augmented_per_class)>0:
unique, label_frequency = np.unique( y[np.sum(self.num_augmented_per_class):], return_counts=True)
else:
unique, label_frequency = np.unique( y, return_counts=True)
else:
label_frequency = np.sum( y, axis=0)
if self.verbose:
print("==label_frequency without adjustment", np.round(label_frequency,3))
# smooth the label frequency if the ratio between the max class / min class is significant >5
# this smoothing is the implementation trick to prevent biased estimation given limited training data
ratio=np.max(label_frequency)/np.min(label_frequency)
if ratio>5:
label_frequency=label_frequency/np.sum(label_frequency)+np.ones( self.nClass )*1.0/self.nClass
return label_frequency/np.sum(label_frequency)
def evaluate_performance(self):
"""
evaluate_performance the classification performance
Store the result into: self.test_acc which is the accuracy for multiclassification \
or the precision for multilabel classification
"""
y_test_pred = self.model.predict(self.x_test)
if self.IsMultiLabel==False:
test_acc= np.round( accuracy_score(y_test_pred, self.y_test)*100, 2)# round to 2 digits xx.yy %
if self.verbose:
print('+++Test Acc: {:.2f}%'.format(test_acc))
self.test_acc +=[test_acc]
else: # multi-label classification
# Precision
prec=sklearn.metrics.precision_score(self.y_test, y_test_pred,average='samples')*100
prec=np.round(prec,2) # round to 2 digits xx.yy %
self.test_acc +=[prec] # precision score
if self.verbose:
print('+++Test Acc: {:.2f}%'.format(prec))
def get_prob_at_max_class(self,pseudo_labels_prob):
"""
Given the 2d probability matrix [N x K], we get the probability at the maximum index
Args:
pseudo_labels_prob: 2d probability matrix [N x K]
Returns:
max_prob_matrix: probability at argmax class [N x 1]
"""
max_prob_matrix=np.zeros((pseudo_labels_prob.shape))
for ii in range(pseudo_labels_prob.shape[0]): # loop over each data point
idxMax=np.argmax(pseudo_labels_prob[ii,:]) # find the highest score class
max_prob_matrix[ii,idxMax]=pseudo_labels_prob[ii,idxMax]
return max_prob_matrix
def post_processing_and_augmentation(self,assigned_pseudo_labels,X,y):
"""
after assigning the pseudo labels in the previous step, we post-process and augment them into X and y
Args:
assigned_pseudo_labels: [N x K] matrix where N is the #unlabels and K is the #class
assigned_pseudo_labels==0 indicates no assignment
assigned_pseudo_labels==1 indicates assignment.
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X
Augmented y
"""
sum_by_cols=np.sum(assigned_pseudo_labels,axis=1)
labels_satisfied_threshold = np.where(sum_by_cols>0)[0]
self.num_augmented_per_class.append( np.sum(assigned_pseudo_labels,axis=0).astype(int) )
if len(labels_satisfied_threshold) == 0: # no point is selected
return X,y
self.selected_unlabelled_index += labels_satisfied_threshold.tolist()
# augment the assigned labels to X and y ==============================================
X = np.vstack((self.unlabelled_data[labels_satisfied_threshold,:], X))
if self.IsMultiLabel==False: # y is [N x 1] matrix
# allow a single data point can be added into multiple
y = np.vstack(( np.argmax( assigned_pseudo_labels[labels_satisfied_threshold,:],axis=1).reshape(-1,1), np.array(y).reshape(-1,1)))
else: # y is [N x L] matrix
y = np.vstack((assigned_pseudo_labels[labels_satisfied_threshold,:], np.array(y)))
if "CSA" in self.algorithm_name: # book keeping
self.len_unlabels.append( len(self.unlabelled_data) )
self.len_accepted_ttest.append( assigned_pseudo_labels.shape[0] )
self.len_selected.append( np.sum(self.num_augmented_per_class) )
# remove the selected data from unlabelled data
self.unlabelled_data = np.delete(self.unlabelled_data, np.unique(labels_satisfied_threshold), 0)
return X,y
def label_assignment_and_post_processing(self, pseudo_labels_prob,X,y, current_iter=0,upper_threshold=None):
"""
Given the threshold, we perform label assignment and post-processing
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
if upper_threshold is None:
upper_threshold=self.upper_threshold
if 'CSA' in self.algorithm_name: # if using CSA, we dont use the upper threshold
upper_threshold=0
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1] # decreasing
temp_idx = np.where(max_prob_matrix[idx_sorted,cc] > upper_threshold )[0]
labels_satisfied_threshold=idx_sorted[temp_idx]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold = labels_satisfied_threshold[:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def get_number_of_labels(self,y):
"""
# given the label y, return the number of classes
Args:
y: label vector (for singlelabel) or matrix (for multilabel)
Output:
number of classes or number of labels
"""
if self.IsMultiLabel==False:
return len(np.unique(y))
else:
return y.shape[1]
def get_max_pseudo_point(self,fraction_of_class, current_iter):
"""
We select more points at the begining and less at later stage
Args:
fraction_of_class: vector of the frequency of points per class
current_iter: current iteration 0,1,2...T
Output:
number_of_max_pseudo_points: scalar
"""
LinearRamp= [(self.num_iters-ii)/self.num_iters for ii in range(self.num_iters)]
SumLinearRamp=np.sum(LinearRamp)
fraction_iter= (self.num_iters-current_iter) / (self.num_iters*SumLinearRamp)
MaxPseudoPoint=fraction_iter*fraction_of_class*self.FractionAllocatedLabel*len(self.unlabelled_data)
return np.int(np.ceil(MaxPseudoPoint))
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
self.selected_unlabelled_index=[]
# Fit to data
self.model.fit(X, y)
# evaluate_performance the performance on test set after Fit the model given the data
self.evaluate_performance()
# Predictive probability on the unlabeled data
pseudo_labels_prob=self.get_predictive_prob_for_unlabelled_data(self.model)
X,y=self.label_assignment_and_post_processing(pseudo_labels_prob,X,y,current_iter)
if self.verbose:
print("#augmented:", self.num_augmented_per_class, " no training data ", len(y))
if np.sum(self.num_augmented_per_class)==0: # no data point is augmented
return
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance()
# def predict(self, X):
# return self.model.predict(X)
# def predict_proba(self, X):
# return self.model.predict_proba(X)
# def decision_function(self, X):
# return self.model.decision_function(X) | 16,406 | 38.439904 | 144 | py |
confident-sinkhorn-allocation | confident-sinkhorn-allocation-master/algorithm/csa.py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 14:19:22 2021
@author: Vu Nguyen
"""
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from scipy import stats
import time
from .pseudo_labeling import Pseudo_Labeling
# Confident Sinkhorn Allocation==================================================================================================
class CSA(Pseudo_Labeling):
def __init__(self, unlabelled_data, x_test,y_test,num_iters=5,num_XGB_models=20,confidence_choice="ttest",verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
super().__init__( unlabelled_data, x_test,y_test,num_iters=num_iters,num_XGB_models=num_XGB_models,verbose=verbose,IsMultiLabel=IsMultiLabel)
self.confidence_choice=confidence_choice
if self.IsMultiLabel==True:
# by default, we use total_variance as the main criteria for multilabel classification
if self.confidence_choice is not None:
self.confidence_choice="variance"
if self.confidence_choice is None or self.confidence_choice=="None":
self.algorithm_name="SLA"
else:
self.algorithm_name="CSA_" + self.confidence_choice
self.elapse_xgb=[]
self.elapse_ttest=[]
self.elapse_sinkhorn=[]
if self.verbose:
print("number of used XGB models M=",self.num_XGB_models)
def predict(self, X):
super().predict(X)
def predict_proba(self, X):
super().predict_proba(X)
def evaluate_performance(self):
super().evaluate_performance()
def get_max_pseudo_point(self,class_freq,current_iter):
return super().get_max_pseudo_point(class_freq,current_iter)
def set_ot_regularizer(self,nRow,nCol):
"""
We set the Sinkhorn regularization parameter based on the ratio of Row/Column
Args:
nRow: number of rows in our cost matrix for Sinkhorn algorithm
nCol: number of columns
Output:
regularization
"""
if nRow/nCol>=300:
regulariser=1
if nRow/nCol>=200:
regulariser=0.5
elif nRow/nCol>=100:
regulariser=0.2
elif nRow/nCol>=50:
regulariser=0.1
else:
regulariser=0.05
if self.IsMultiLabel:
if self.nClass>20:
regulariser=regulariser*5
else:
regulariser=regulariser*200
return regulariser
def data_uncertainty(self,pseudo_labels_prob_list):
"""
Args:
pseudo_labels_prob_list: [M x N x K]
Output:
entropy: [N x 1]
"""
ent=np.zeros((pseudo_labels_prob_list.shape[0],pseudo_labels_prob_list.shape[1]))
for mm in range(pseudo_labels_prob_list.shape[0]):
ent[mm,:]= self.entropy_prediction(pseudo_labels_prob_list[mm,:,:])
return np.mean(ent,axis=0)
def entropy_prediction(self,ave_pred,atClass=None):
"""
Args:
ave_pred: [N x K]
Output:
entropy: [N x 1]
"""
ent=[0]*ave_pred.shape[0]
for ii in range(ave_pred.shape[0]):
ent[ii]= - np.sum( ave_pred[ii,:]*np.log(ave_pred[ii,:]))
return np.asarray(ent)
def total_entropy(self,pseudo_labels_prob_list, atClass=None):
"""
calculate total entropy
Args:
pseudo_labels_prob_list: [M x N x K]: M #XGB, N #unlabels, K #class
Output:
total_entropy score [N x 1]
"""
ave_pred=np.mean(pseudo_labels_prob_list,axis=0) # average over model
total_uncertainty=self.entropy_prediction(ave_pred,atClass)
return total_uncertainty
def knowledge_uncertainty(self,pred):
total_uncertainty=self.total_uncertainty(pred)
data_uncertainty=self.data_uncertainty(pred)
knowledge_uncertainty = total_uncertainty-data_uncertainty
return knowledge_uncertainty
def total_variance(self,pseudo_labels_prob_list):
"""
calculate total variance
Args:
pseudo_labels_prob_list: [M x N x K]: M #XGB, N #unlabels, K #class
Output:
standard deviation score [N x 1]
"""
# [nModel, nPoint, nClass]
std_pred = np.std( pseudo_labels_prob_list, axis=0) # std over models
total_std = np.sum(std_pred, axis=1) # sum of std over classes
return total_std
def calculate_ttest(self,pseudo_labels_prob_list):
"""
calculate t-test
Args:
pseudo_labels_prob_list: [M x N x K]: M #XGB, N #unlabels, K #class
Output:
t-test score [N x 1]
"""
num_points=pseudo_labels_prob_list.shape[1]
var_rows_argmax=[0]*num_points
var_rows_arg2ndmax=[0]*num_points
t_test=[0]*num_points
t_value=[0]*num_points
pseudo_labels_prob= np.mean(pseudo_labels_prob_list,axis=0)
temp=np.argsort(-pseudo_labels_prob,axis=1) # decreasing
idxargmax=temp[:,0]
idx2nd_argmax= temp[:,1]
for jj in range(num_points):# go over each row (data points)
idxmax =idxargmax[jj]
idx2ndmax=idx2nd_argmax[jj]
var_rows_argmax[jj]=np.var(pseudo_labels_prob_list[:,jj,idxmax ])
var_rows_arg2ndmax[jj]=np.var(pseudo_labels_prob_list[:,jj,idx2ndmax])
nominator=pseudo_labels_prob[jj, idxmax]-pseudo_labels_prob[jj, idx2ndmax]
temp=(0.1 + var_rows_argmax[jj] + var_rows_arg2ndmax[jj] )/self.num_XGB_models
denominator=np.sqrt(temp)
t_test[jj] = nominator/denominator
# compute degree of freedom=========================================
nominator = (var_rows_argmax[jj] + var_rows_arg2ndmax[jj])**2
denominator= var_rows_argmax[jj]**2 + var_rows_arg2ndmax[jj]**2
denominator=denominator/(self.num_XGB_models-1)
dof=nominator/denominator
t_value[jj]=stats.t.ppf(1-0.025, dof)
t_test[jj]=t_test[jj]-t_value[jj]
return t_test
def label_assignment_and_post_processing_for_CSA(self, assignment_matrix,pseudo_labels_prob,X,y, current_iter=0):
"""
Given the threshold, we perform label assignment and post-processing
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
assignment_matrix=self.get_prob_at_max_class(assignment_matrix)
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( assignment_matrix[:,cc])[::-1] # decreasing
idx_assignment = np.where(assignment_matrix[idx_sorted,cc] > 0 )[0]
# we dont accept labels with less than 0.5 prediction, this works well for multilabel classification
idx_satisfied = np.where(pseudo_labels_prob[idx_sorted[idx_assignment],cc] > 0.5 )[0]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold=idx_sorted[idx_satisfied][:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
# Fit to data
self.model.fit(X, y)
self.evaluate_performance()
num_points=self.unlabelled_data.shape[0]
pseudo_labels_prob_list=[0]*self.num_XGB_models
tic = time.perf_counter()
# estimate prob using unlabelled data on M XGB models
pseudo_labels_prob_list=[0]*self.num_XGB_models
for mm in range(self.num_XGB_models):
self.XGBmodels_list[mm].fit(X, y)
pseudo_labels_prob_list[mm] = self.get_predictive_prob_for_unlabelled_data(self.XGBmodels_list[mm])
toc = time.perf_counter()
self.elapse_xgb.append(toc-tic)
pseudo_labels_prob_list=np.asarray(pseudo_labels_prob_list) # P [M x N x K]
pseudo_labels_prob= np.mean(pseudo_labels_prob_list,axis=0) # \bar{P} [N x K]
tic = time.perf_counter() # Start Time
# estimate confidence level here====================================
if self.confidence_choice=="variance":
tot_variance=self.total_variance(pseudo_labels_prob_list)
confidence=1-tot_variance
confidence=confidence-np.mean(confidence)
elif self.confidence_choice=="neg_variance":
confidence=self.total_variance(pseudo_labels_prob_list)
confidence=confidence-np.mean(confidence)
elif self.confidence_choice=='entropy':
tot_ent=self.total_entropy(pseudo_labels_prob_list)
confidence=1-tot_ent
confidence=confidence-0.5*np.mean(confidence)
elif self.confidence_choice=='neg_entropy':
confidence=self.total_entropy(pseudo_labels_prob_list)
confidence=confidence-np.mean(confidence)
elif self.confidence_choice=="ttest":
confidence=self.calculate_ttest(pseudo_labels_prob_list)
elif self.confidence_choice=="neg_ttest":
confidence=self.calculate_ttest(pseudo_labels_prob_list)
confidence=-np.asarray(confidence)
elif self.confidence_choice==None or self.confidence_choice=="None": # not using any confidence score, accepting all data point similar to SLA
confidence=np.ones((1,num_points))
confidence=np.clip(confidence, a_min=0,a_max=np.max(confidence))
toc = time.perf_counter() # End Time
self.elapse_ttest.append(toc-tic)
# for numerical stability of OT, select the nonzero entry only
idxNoneZero=np.where( confidence>0 )[0]
#idxNoneZero=np.where( (confidence>0) & (confidence<0.9*np.max(confidence)) )[0]
num_points= len(idxNoneZero)
if self.verbose:
print("num_points accepted= ",num_points, " total num_points=",len(self.unlabelled_data))
if len(idxNoneZero)==0: # terminate if could not find any point satisfying constraints
return self.test_acc
# Sinkhorn's algorithm ======================================================================
# fraction of label being assigned.
max_allocation_point= self.get_max_pseudo_point(class_freq=1,current_iter=current_iter)
rho=max_allocation_point/ len(self.unlabelled_data)
# regulariser for Sinkhorn's algorithm
regulariser=self.set_ot_regularizer(num_points, self.nClass)
tic = time.perf_counter()
# this is w_{+} and w_{-} in the paper
upper_b_per_class=self.label_frequency*1.1
lower_b_per_class=self.label_frequency*0.9
# we define row marginal distribution =============================
row_marginal=np.ones(num_points)
temp=num_points*rho*(np.sum(upper_b_per_class)-np.sum(lower_b_per_class))
row_marginal = np.append(row_marginal,temp)
if self.verbose:
print("#unlabel={:d} #points/#classes={:d}/{:d}={:.2f} reg={:.2f}".format(
len(self.unlabelled_data),num_points,self.nClass,num_points/self.nClass,regulariser))
C=1-pseudo_labels_prob # cost # expand Cost matrix
C=C[idxNoneZero,:]
C=np.vstack((C,np.zeros((1,self.nClass))))
C=np.hstack((C,np.zeros((len(idxNoneZero)+1,1))))
K=np.exp(-C/regulariser)
# define column marginal distribution ==============================
col_marginal = rho*upper_b_per_class*num_points # frequency of the class label
temp=num_points*(1-rho*np.sum(lower_b_per_class))
col_marginal = np.append(col_marginal,temp)
# checking the total mass of column marginal ~ row marginal
if np.abs( np.sum(col_marginal) - np.sum(row_marginal) ) > 0.001 :
print("np.sum(dist_labels) - np.sum(dist_points) > 0.001")
# initialize uu and perform Sinkhorn algorithm
uu=np.ones( (num_points+1,))
for jj in range(100):
vv= col_marginal / np.dot(K.T, uu)
uu= row_marginal / np.dot(K, vv)
# compute label assignment matrix Q'
Q_prime= np.atleast_2d(uu).T*(K*vv.T)
toc = time.perf_counter()
self.elapse_sinkhorn.append(toc-tic)
# this is the final Q matrix
assignment_matrix_Q=np.zeros((pseudo_labels_prob.shape))
assignment_matrix_Q[idxNoneZero,:]=Q_prime[:-1,:-1]
X,y=self.label_assignment_and_post_processing_for_CSA(assignment_matrix_Q,pseudo_labels_prob,X,y,current_iter)
if self.verbose:
print("#augmented:", self.num_augmented_per_class, " len of training data ", len(y))
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance() | 16,573 | 38.368171 | 155 | py |
confident-sinkhorn-allocation | confident-sinkhorn-allocation-master/algorithm/ups.py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 14:19:22 2021
@author: Vu Nguyen
"""
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from .pseudo_labeling import Pseudo_Labeling
# UPS: ===========================================================================================
# Rizve, Mamshad Nayeem, Kevin Duarte, Yogesh S. Rawat, and Mubarak Shah.
# "In Defense of Pseudo-Labeling: An Uncertainty-Aware Pseudo-label Selection Framework for Semi-Supervised Learning."
# ICLR. 2020.
# https://arxiv.org/pdf/2101.06329.pdf
class UPS(Pseudo_Labeling):
# adaptive thresholding
def __init__(self, unlabelled_data, x_test,y_test,num_iters=5,upper_threshold = 0.8, lower_threshold = 0.2,\
num_XGB_models=10,verbose = False,IsMultiLabel=False):
"""
unlabelled_data : [N x d] where N is the number of unlabeled data, d is the feature dimension
x_test :[N_test x d]
y_test :[N_test x 1] for multiclassification or [N_test x K] for multilabel classification
num_iters : number of pseudo-iterations, recommended = 5 as in the paper
upper_threshold : the upper threshold used for pseudo-labeling, e.g., we assign label if the prob > 0.8
fraction_allocation : the faction of label allocation, if fraction_allocation=1, we assign labels to 100% of unlabeled data
lower_threshold : lower threshold, used for UPS
num_XGB_models : number of XGB models used for UPS and CSA, recommended = 10
verbose : verbose
IsMultiLabel : False => Multiclassification or True => Multilabel classification
"""
super().__init__( unlabelled_data, x_test,y_test,num_iters=num_iters,upper_threshold=upper_threshold,\
lower_threshold=lower_threshold,num_XGB_models=num_XGB_models,verbose=verbose,IsMultiLabel=IsMultiLabel)
self.algorithm_name="UPS"
def predict(self, X):
super().predict(X)
def predict_proba(self, X):
return super().predict_proba(X)
def evaluate_performance(self):
super().evaluate_performance()
def uncertainty_score(self, matrix_prob):
return super().uncertainty_score(matrix_prob)
def get_prob_at_max_class(self,pseudo_labels_prob):
return super().get_prob_at_max_class(pseudo_labels_prob)
def get_max_pseudo_point(self,class_freq,current_iter):
return super().get_max_pseudo_point(class_freq,current_iter)
def label_assignment_and_post_processing_UPS(self, pseudo_labels_prob,uncertainty_scores,X,y, current_iter=0,upper_threshold=None):
"""
Given the threshold, we perform label assignment and post-processing
Args:
pseudo_labels_prob: predictive prob [N x K] where N is #unlabels, K is #class
uncertainty_scores : uncertainty_score of each data point at each class [N x K]
X: existing pseudo_labeled + labeled data [ N' x d ]
y: existing pseudo_labeled + labeled data [ N' x 1 ] for multiclassification
y: existing pseudo_labeled + labeled data [ N' x K ] for multilabel classification
Output:
Augmented X = augmented_X + X
Augmented y = augmented_y + Y
"""
if self.IsMultiLabel==False:
#go over each row (data point), only keep the argmax prob
# because we only allow a single data point to a single class
max_prob_matrix=self.get_prob_at_max_class(pseudo_labels_prob)
else:
# we dont need to get prob at max class for multi-label
# because a single data point can be assigned to multiple classes
max_prob_matrix=pseudo_labels_prob
assigned_pseudo_labels=np.zeros((max_prob_matrix.shape[0],self.nClass)).astype(int)
MaxPseudoPoint=[0]*self.nClass
for cc in range(self.nClass): # loop over each class
MaxPseudoPoint[cc]=self.get_max_pseudo_point(self.label_frequency[cc],current_iter)
idx_sorted = np.argsort( max_prob_matrix[:,cc])[::-1] # decreasing
idx_within_prob = np.where( max_prob_matrix[idx_sorted,cc] > self.upper_threshold )[0]
idx_within_prob_uncertainty = np.where( uncertainty_scores[idx_sorted[idx_within_prob],cc] < self.lower_threshold)[0]
# only select upto MaxPseudoPoint[cc] points
labels_satisfied_threshold=idx_sorted[idx_within_prob_uncertainty][:MaxPseudoPoint[cc]]
assigned_pseudo_labels[labels_satisfied_threshold, cc]=1
if self.verbose:
print("MaxPseudoPoint",MaxPseudoPoint)
return self.post_processing_and_augmentation(assigned_pseudo_labels,X,y)
def fit(self, X, y):
"""
main algorithm to perform pseudo labelling
Args:
X: train features [N x d]
y: train targets [N x 1]
Output:
we record the test_accuracy a vector of test accuracy per pseudo-iteration
"""
print("=====",self.algorithm_name)
self.nClass=self.get_number_of_labels(y)
self.label_frequency=self.estimate_label_frequency(y)
for current_iter in (tqdm(range(self.num_iters)) if self.verbose else range(self.num_iters)):
# Fit to data
self.model.fit(X, y)
self.evaluate_performance()
# estimate prob using unlabelled data on M XGB models
pseudo_labels_prob_list=[0]*self.num_XGB_models
for mm in range(self.num_XGB_models):
self.XGBmodels_list[mm].fit(X, y) # fit an XGB model
pseudo_labels_prob_list[mm] = self.get_predictive_prob_for_unlabelled_data(self.XGBmodels_list[mm])
pseudo_labels_prob_list=np.asarray(pseudo_labels_prob_list)
pseudo_labels_prob= np.mean(pseudo_labels_prob_list,axis=0)
# calculate uncertainty estimation for each data points at the argmax class
uncertainty_scores=np.ones((pseudo_labels_prob.shape))
for ii in range(pseudo_labels_prob.shape[0]):# go over each row (data points)
idxMax=np.argmax( pseudo_labels_prob[ii,:] )
uncertainty_scores[ii,idxMax]=np.std(pseudo_labels_prob_list[:,ii,idxMax])
X,y=self.label_assignment_and_post_processing_UPS(pseudo_labels_prob,uncertainty_scores,X,y,current_iter)
if np.sum(self.num_augmented_per_class)==0: # no data point is augmented
return
if self.verbose:
print("#added:", self.num_augmented_per_class, " no train data", len(y))
# evaluate_performance at the last iteration for reporting purpose
self.model.fit(X, y)
self.evaluate_performance()
| 7,123 | 42.175758 | 135 | py |
Unimer | Unimer-master/lr_scheduler_wrapper.py | # coding=utf8
from typing import Dict, Any
from overrides import overrides
from torch.optim.lr_scheduler import MultiStepLR
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
class PyTorchMultiStepLearningRateSchedulerWrapper(LearningRateScheduler):
def __init__(self, lr_scheduler: MultiStepLR) -> None:
self.lr_scheduler = lr_scheduler
def get_values(self):
return self.lr_scheduler.get_lr()
@overrides
def step(self, metric: float = None, epoch: int = None) -> None:
self.lr_scheduler.step(epoch)
@overrides
def state_dict(self) -> Dict[str, Any]:
return self.lr_scheduler.state_dict()
@overrides
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.lr_scheduler.load_state_dict(state_dict)
| 816 | 27.172414 | 76 | py |
Unimer | Unimer-master/custom_trainer.py | # coding=utf8
import math
import time
import torch
import logging
from typing import Dict, List, Tuple, Optional, Iterable, Union, Callable, NoReturn
from allennlp.data import Instance
from allennlp.data.iterators.data_iterator import TensorDict, DataIterator
from allennlp.models import Model
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.moving_average import MovingAverage
from allennlp.training.tensorboard_writer import TensorboardWriter
from overrides import overrides
from allennlp.training import Trainer
from allennlp.nn import util as nn_util
from allennlp.common.util import (dump_metrics, gpu_memory_mb, peak_memory_mb,
lazy_groups_of)
from allennlp.common.tqdm import Tqdm
from allennlp.training import util as training_util
logger = logging.getLogger(__name__)
def record_loss(outputs_dict: Dict, tensorboard: TensorboardWriter):
for key, value in outputs_dict.items():
if key.endswith("loss"):
tensorboard.add_train_scalar("loss/%s" % key, value)
class CustomTrainer(Trainer):
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
checkpointer: Checkpointer = None,
model_save_interval: float = None,
cuda_device: Union[int, List] = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
log_batch_size_period: Optional[int] = None,
moving_average: Optional[MovingAverage] = None,
tensorboard_log_batch_callback: Optional[Callable[[Dict, TensorboardWriter], NoReturn]] = record_loss,
loss_fn: Optional[Callable[[Dict, int], torch.Tensor]] = None) -> None:
super().__init__(model, optimizer, iterator, train_dataset, validation_dataset, patience,
validation_metric, validation_iterator, shuffle, num_epochs, serialization_dir,
num_serialized_models_to_keep, keep_serialized_model_every_num_seconds, checkpointer,
model_save_interval, cuda_device, grad_norm, grad_clipping, learning_rate_scheduler,
momentum_scheduler, summary_interval, histogram_interval, should_log_parameter_statistics,
should_log_learning_rate, log_batch_size_period, moving_average)
self.tensorboard_log_batch_callback = tensorboard_log_batch_callback
self.loss_fn = loss_fn
def get_output_dict(self, batch_group: List[TensorDict], for_training: bool) -> Dict[str, torch.Tensor]:
"""
Does a forward pass on the given batches and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
if self._multiple_gpu:
output_dict = training_util.data_parallel(batch_group, self.model, self._cuda_devices)
else:
assert len(batch_group) == 1
batch = batch_group[0]
batch = nn_util.move_to_device(batch, self._cuda_devices[0])
output_dict = self.model(**batch)
return output_dict
def get_batch_loss(self, output_dict: Dict[str, torch.Tensor], for_training: bool):
try:
if self.loss_fn is None:
loss = output_dict["loss"]
else:
loss = self.loss_fn(output_dict, self._batch_num_total)
if for_training:
loss += self.model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError("The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs).")
loss = None
return loss
@overrides
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
peak_cpu_usage = peak_memory_mb()
logger.info(f"Peak CPU memory usage MB: {peak_cpu_usage}")
gpu_usage = []
for gpu, memory in gpu_memory_mb().items():
gpu_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self.model.train()
num_gpus = len(self._cuda_devices)
# Get tqdm for the training batches
raw_train_generator = self.iterator(self.train_data,
num_epochs=1,
shuffle=self.shuffle)
train_generator = lazy_groups_of(raw_train_generator, num_gpus)
num_training_batches = math.ceil(self.iterator.get_num_batches(self.train_data)/num_gpus)
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
histogram_parameters = set(self.model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
train_generator_tqdm = Tqdm.tqdm(train_generator,
total=num_training_batches)
cumulative_batch_size = 0
for batch_group in train_generator_tqdm:
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self.optimizer.zero_grad()
output_dict = self.get_output_dict(batch_group, for_training=True)
loss = self.get_batch_loss(output_dict, for_training=True)
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss.backward()
train_loss += loss.item()
batch_grad_norm = self.rescale_gradients()
# This does nothing if batch_num_total is None or you are using a
# scheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(batch_num_total)
if self._tensorboard.should_log_histograms_this_batch():
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {name: param.detach().cpu().clone()
for name, param in self.model.named_parameters()}
self.optimizer.step()
for name, param in self.model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1, ))
param_norm = torch.norm(param.view(-1, )).cpu()
self._tensorboard.add_train_scalar("gradient_update/" + name,
update_norm / (param_norm + 1e-7))
else:
self.optimizer.step()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(batch_num_total)
# Update the description with the latest metrics
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch)
description = training_util.description_from_metrics(metrics)
train_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard
if self._tensorboard.should_log_this_batch():
self._tensorboard.log_parameter_and_gradient_statistics(self.model, batch_grad_norm)
self._tensorboard.log_learning_rates(self.model, self.optimizer)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"])
self._tensorboard.log_metrics({"epoch_metrics/" + k: v for k, v in metrics.items()})
if self.tensorboard_log_batch_callback:
self.tensorboard_log_batch_callback(output_dict, self._tensorboard)
if self._tensorboard.should_log_histograms_this_batch():
self._tensorboard.log_histograms(self.model, histogram_parameters)
if self._log_batch_size_period:
cur_batch = sum([training_util.get_batch_size(batch) for batch in batch_group])
cumulative_batch_size += cur_batch
if (batches_this_epoch - 1) % self._log_batch_size_period == 0:
average = cumulative_batch_size/batches_this_epoch
logger.info(f"current batch size: {cur_batch} mean batch size: {average}")
self._tensorboard.add_train_scalar("current_batch_size", cur_batch)
self._tensorboard.add_train_scalar("mean_batch_size", average)
# Save model if needed.
if self._model_save_interval is not None and (
time.time() - last_save_time > self._model_save_interval
):
last_save_time = time.time()
self._save_checkpoint(
'{0}.{1}'.format(epoch, training_util.time_to_str(int(last_save_time)))
)
metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch, reset=True)
metrics['cpu_memory_MB'] = peak_cpu_usage
for (gpu_num, memory) in gpu_usage:
metrics['gpu_'+str(gpu_num)+'_memory_MB'] = memory
return metrics
@overrides
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self.model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self.iterator
num_gpus = len(self._cuda_devices)
raw_val_generator = val_iterator(self._validation_data,
num_epochs=1,
shuffle=False)
val_generator = lazy_groups_of(raw_val_generator, num_gpus)
num_validation_batches = math.ceil(val_iterator.get_num_batches(self._validation_data)/num_gpus)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch_group in val_generator_tqdm:
output_dict = self.get_output_dict(batch_group, for_training=False)
loss = self.get_batch_loss(output_dict, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(self.model, val_loss, batches_this_epoch)
description = training_util.description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
return val_loss, batches_this_epoch
| 13,380 | 46.282686 | 119 | py |
Unimer | Unimer-master/model_builder.py | # coding=utf8
import numpy
import torch
from typing import Dict, List, Callable
from overrides import overrides
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.training.metrics import Metric
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import Embedding
from allennlp.data.dataset_readers import DatasetReader
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.models.encoder_decoders.simple_seq2seq import SimpleSeq2Seq
from allennlp.modules.attention import BilinearAttention, DotProductAttention
from allennlp.predictors import Seq2SeqPredictor
from allennlp.common import Params
from allennlp.nn import Activation, InitializerApplicator
from grammars.grammar import Grammar
from grammars.parse_ast import AST
from neural_models.seq2seq_model import Seq2SeqModel
from neural_models.recombination_seq2seq import RecombinationSeq2Seq
from neural_models.recombination_seq2seq_copy import RecombinationSeq2SeqWithCopy
from neural_models.grammar_based_models import GrammarModel
from neural_models.modules.grammar_decoder import LSTMGrammarDecoder
from neural_models.modules.grammar_copy_decoder import LSTMGrammarCopyDecoder
from neural_models.modules.grammar_copy_decoder_2 import LSTMGrammarCopyDecoder as LSTMGrammarCopyDecoder2
from neural_models.GNN import GNNCopyTransformer
from neural_models.GNN2 import GNNCopyTransformer2
from metrics.sequency_accuracy import SequenceAccuracy
def get_predictor(model, reader) -> Seq2SeqPredictor:
return Seq2SeqPredictor(model=model, dataset_reader=reader)
def build_grammar_model(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
grammar: Grammar,
source_namespace: str = 'source_tokens',
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
decoder = LSTMGrammarDecoder(grammar, AST, lstm_hidden_dim=flags.decoder_hidden_dim,
num_lstm_layers=flags.decoder_num_layers,
rule_pad_index=data_reader.rule_pad_index, rule_embedding_dim=flags.rule_embedding_dim,
nonterminal_pad_index=data_reader.nonterminal_pad_index,
nonterminal_end_index=data_reader.nonterminal_end_index,
nonterminal_embedding_dim=flags.nonterminal_embedding_dim,
source_encoding_dim=flags.encoder_hidden_dim * 2,
dropout=flags.dropout, max_target_length=flags.max_decode_length)
metric = SequenceAccuracy()
model = GrammarModel(vocab, source_embedder, lstm_encoder,
decoder, metric, flags, regularizer=None)
return model
def build_grammar_copy_model(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
grammar: Grammar,
source_namespace: str = 'source_tokens',
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
decoder = LSTMGrammarCopyDecoder(grammar, AST, lstm_hidden_dim=flags.decoder_hidden_dim,
num_lstm_layers=flags.decoder_num_layers,
rule_pad_index=data_reader.rule_pad_index,
rule_embedding_dim=flags.rule_embedding_dim,
nonterminal_pad_index=data_reader.nonterminal_pad_index,
nonterminal_end_index=data_reader.nonterminal_end_index,
nonterminal_embedding_dim=flags.nonterminal_embedding_dim,
source_encoding_dim=flags.encoder_hidden_dim * 2,
dropout=flags.dropout, max_target_length=flags.max_decode_length)
metric = SequenceAccuracy()
model = GrammarModel(vocab, source_embedder, lstm_encoder,
decoder, metric, flags, regularizer=None)
return model
def build_grammar_copy_model_2(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
grammar: Grammar,
source_namespace: str = 'source_tokens',
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
decoder = LSTMGrammarCopyDecoder2(grammar, AST, lstm_hidden_dim=flags.decoder_hidden_dim,
num_lstm_layers=flags.decoder_num_layers,
rule_pad_index=data_reader.rule_pad_index,
rule_embedding_dim=flags.rule_embedding_dim,
nonterminal_pad_index=data_reader.nonterminal_pad_index,
nonterminal_end_index=data_reader.nonterminal_end_index,
nonterminal_embedding_dim=flags.nonterminal_embedding_dim,
source_encoding_dim=flags.encoder_hidden_dim * 2,
dropout=flags.dropout, max_target_length=flags.max_decode_length)
metric = SequenceAccuracy()
model = GrammarModel(vocab, source_embedder, lstm_encoder,
decoder, metric, flags, regularizer=None)
return model
def build_parsing_seq2seq_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = DotProductAttention()
metric = SequenceAccuracy()
model = Seq2SeqModel(vocab, source_embedder, lstm_encoder, flags.max_decode_length,
target_embedding_dim=flags.decoder_hidden_dim,
target_namespace=target_namespace,
attention=attention,
beam_size=flags.beam_size,
use_bleu=False,
seq_metrics=metric)
return model
def build_parsing_recombination_seq2seq_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
lstm = PytorchSeq2SeqWrapper(torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = BilinearAttention(flags.attention_hidden_dim, flags.attention_hidden_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
initializer = InitializerApplicator.from_params([(".*bias", Params({"type": "constant", "val": 0})),
('.*', Params({"type": "uniform", "a": -0.1, "b": 0.1}))])
metric = SequenceAccuracy()
model = RecombinationSeq2Seq(vocab, source_embedder, lstm, flags.max_decode_length,
seq_metrics=metric,
target_embedding_dim=flags.target_embedding_dim,
target_namespace=target_namespace,
output_attention=attention,
beam_size=flags.beam_size,
use_bleu=False,
encoder_input_dropout=flags.encoder_input_dropout,
encoder_output_dropout=flags.encoder_output_dropout,
dropout=flags.dropout,
feed_output_attention_to_decoder=True,
keep_decoder_output_dim_same_as_encoder=True,
initializer=initializer)
return model
def build_parsing_recombination_seq2seq_copy_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
lstm = PytorchSeq2SeqWrapper(torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = BilinearAttention(flags.attention_hidden_dim, flags.attention_hidden_dim, normalize=False)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
initializer = InitializerApplicator.from_params([(".*bias", Params({"type": "constant", "val": 0})),
('.*', Params({"type": "uniform", "a": -0.1, "b": 0.1}))])
metric = SequenceAccuracy()
model = RecombinationSeq2SeqWithCopy(vocab, source_embedder, lstm, flags.max_decode_length,
seq_metrics=metric,
source_namespace=source_namespace,
target_namespace=target_namespace,
target_embedding_dim=flags.target_embedding_dim,
attention=attention,
beam_size=flags.beam_size,
use_bleu = False,
encoder_input_dropout=flags.encoder_input_dropout,
encoder_output_dropout=flags.encoder_output_dropout,
dropout=flags.dropout,
feed_output_attention_to_decoder=True,
keep_decoder_output_dim_same_as_encoder=True,
initializer=initializer)
return model
def build_gnn_parsing_model(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
is_test: bool = False,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens',
segment_namespace: str = 'segment_tokens',
) -> Model:
metric = SequenceAccuracy()
model = GNNCopyTransformer(
vocab=vocab,
source_namespace=source_namespace,
target_namespace=target_namespace,
segment_namespace=segment_namespace,
max_decoding_step=flags.max_decode_length,
token_based_metric=metric,
source_embedding_dim=flags.source_embedding_dim,
target_embedding_dim=flags.target_embedding_dim,
encoder_d_model=flags.transformer_encoder_hidden_dim,
decoder_d_model=flags.transformer_decoder_hidden_dim,
encoder_nhead=flags.transformer_encoder_nhead,
decoder_nhead=flags.transformer_decoder_nhead,
num_decoder_layers=flags.transformer_num_decoder_layers,
num_encoder_layers=flags.transformer_num_encoder_layers,
encoder_dim_feedforward=flags.transformer_encoder_feedforward_dim,
decoder_dim_feedforward=flags.transformer_decoder_feedforward_dim,
dropout=flags.dropout,
beam_size=1,
nlabels=flags.gnn_transformer_num_edge_labels,
max_decode_clip_range=flags.gnn_max_decode_clip_range,
encode_edge_label_with_matrix=flags.gnn_encode_edge_label_with_matrix,
is_test=is_test
)
return model
def build_gnn_parsing_model2(
flags,
data_reader: DatasetReader,
vocab: Vocabulary,
is_test: bool = False,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens',
segment_namespace: str = 'segment_tokens',
) -> Model:
metric = SequenceAccuracy()
model = GNNCopyTransformer2(
vocab=vocab,
source_namespace=source_namespace,
target_namespace=target_namespace,
segment_namespace=segment_namespace,
max_decoding_step=flags.max_decode_length,
token_based_metric=metric,
source_embedding_dim=flags.source_embedding_dim,
target_embedding_dim=flags.target_embedding_dim,
encoder_d_model=flags.transformer_encoder_hidden_dim,
decoder_d_model=flags.transformer_decoder_hidden_dim,
encoder_nhead=flags.transformer_encoder_nhead,
decoder_nhead=flags.transformer_decoder_nhead,
num_decoder_layers=flags.transformer_num_decoder_layers,
num_encoder_layers=flags.transformer_num_encoder_layers,
encoder_dim_feedforward=flags.transformer_encoder_feedforward_dim,
decoder_dim_feedforward=flags.transformer_decoder_feedforward_dim,
dropout=flags.dropout,
beam_size=1,
nlabels=flags.gnn_transformer_num_edge_labels,
max_decode_clip_range=flags.gnn_max_decode_clip_range,
encode_edge_label_with_matrix=flags.gnn_encode_edge_label_with_matrix,
is_test=is_test
)
return model
def build_seq2seq_model(
flags,
data_reader,
vocab: Vocabulary,
source_namespace: str = 'source_tokens',
target_namespace: str = 'target_tokens'
) -> Model:
source_embedding = Embedding(vocab.get_vocab_size(namespace=source_namespace),
embedding_dim=flags.source_embedding_dim)
source_embedder = BasicTextFieldEmbedder({'tokens': source_embedding})
lstm_encoder = PytorchSeq2SeqWrapper(
torch.nn.LSTM(flags.source_embedding_dim, flags.encoder_hidden_dim, batch_first=True,
bidirectional=flags.encoder_bidirectional))
attention = DotProductAttention()
model = SimpleSeq2Seq(vocab, source_embedder, lstm_encoder, flags.max_decode_length,
target_embedding_dim=flags.decoder_hidden_dim,
target_namespace=target_namespace,
attention=attention,
beam_size=flags.beam_size,
use_bleu=True)
return model
| 15,631 | 50.084967 | 120 | py |
Unimer | Unimer-master/run_parser.py | # coding=utf-8
import re
import os
import json
import copy
import random
import torch
import itertools
from typing import Dict, Any
from overrides import overrides
from absl import app
from absl import flags
import numpy as np
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from allennlp.data.iterators import BucketIterator
from allennlp.training.util import evaluate as model_evaluate
from allennlp.data.vocabulary import Vocabulary
from allennlp.training.learning_rate_schedulers import LearningRateScheduler, NoamLR
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from grammars.grammar import get_grammar
from grammars.entity_matcher import get_entity_matcher, get_seq2seq_entity_matcher
from grammars.gnn_entity_matcher import get_gnn_entity_replacer, get_gnn_entity_matcher
from grammars.utils import get_logical_form_preprocessor, get_logical_form_postprocessor,\
get_logical_form_tokenizer, get_utterance_preprocessor
from data_readers.grammar_based_reader import GrammarBasedDataReader
from data_readers.grammar_copy_based_reader import GrammarCopyBasedDataReader
from data_readers.seq2seq_data_reader import Seq2SeqDataReader
from data_readers.gnn_data_reader import GNNCopyTransformerDataReader
from model_builder import build_grammar_model, get_predictor, build_seq2seq_model, build_parsing_seq2seq_model, \
build_grammar_copy_model, build_grammar_copy_model_2, build_parsing_recombination_seq2seq_model,\
build_parsing_recombination_seq2seq_copy_model, build_gnn_parsing_model, build_gnn_parsing_model2
from custom_trainer import CustomTrainer
from lr_scheduler_wrapper import PyTorchMultiStepLearningRateSchedulerWrapper
import evaluations
flags.DEFINE_bool('do_train', False, 'whether to do training')
flags.DEFINE_integer('seed', 100, 'random seed')
# Model Type
flags.DEFINE_enum(
'model', 'parsing',
['parsing', 'copy_parsing', 'copy_parsing_2',
'seq_parsing', 'recombination_seq_parsing',
'recombination_copy_seq_parsing',
'translation', 'gnn_parsing', 'gnn_parsing2'],
'Specifying parsing models and translation models'
)
# Data
flags.DEFINE_enum('task', 'geo', ['geo', 'atis', 'job'], 'task')
flags.DEFINE_string('train_data', os.path.join(
'data', 'geo', 'geo_prolog_train.tsv'), 'training data path')
flags.DEFINE_string('test_data', os.path.join(
'data', 'geo', 'geo_prolog_test.tsv'), 'training data path')
flags.DEFINE_enum('language', 'prolog', [
'funql', 'typed_funql', 'prolog', 'prolog2', 'lambda',
'lambda2', 'lambda3', 'lambda4', 'sql', 'sql2', 'sql3'], 'target language to generate')
flags.DEFINE_integer('min_count', 1, 'Minimum counts for vocabulary')
# Model Hyper-parameters
flags.DEFINE_integer('source_embedding_dim', 128, 'Embedding size of source')
flags.DEFINE_integer('encoder_hidden_dim', 128, 'Hidden size of lstm encoder')
flags.DEFINE_bool('encoder_bidirectional', True,
'Whether to use birdirectional lstm')
flags.DEFINE_float('encoder_output_dropout', 0.2,
'Input dropout rate of encoder')
flags.DEFINE_float('encoder_input_dropout', 0.2,
'Output dropout rate of encoder')
# Grammar Decoder
flags.DEFINE_integer('target_embedding_dim', 128, 'Hidden size of lstm decoder')
flags.DEFINE_integer('decoder_hidden_dim', 128, 'Hidden size of lstm decoder')
flags.DEFINE_integer('decoder_num_layers', 1, 'Number of layers in decoder')
flags.DEFINE_integer('rule_embedding_dim', 64, 'Embedding size of rule')
flags.DEFINE_integer('nonterminal_embedding_dim', 64,
'Embedding size of non-terminal')
flags.DEFINE_integer('max_decode_length', 100, 'Maximum decode steps')
flags.DEFINE_integer('attention_hidden_dim', 100, 'Attention hidden dim for Bilinear Attention')
flags.DEFINE_float('dropout', 0.2, 'Dropout rate')
# GNN Hyperparameters
flags.DEFINE_integer('transformer_encoder_hidden_dim', 128, 'hidden dimension of encoder of transformer')
flags.DEFINE_integer('transformer_decoder_hidden_dim', 128, 'hidden dimension of decoder of transformer')
flags.DEFINE_integer('transformer_encoder_nhead', 128, 'number of head in self attention')
flags.DEFINE_integer('transformer_decoder_nhead', 128, 'number of head in self attention')
flags.DEFINE_integer('transformer_num_encoder_layers', 3, 'number of encoder layer in transformer')
flags.DEFINE_integer('transformer_num_decoder_layers', 3, 'number of decoder layer in transformer')
flags.DEFINE_integer('transformer_encoder_feedforward_dim', 256, 'dimension of feed forward layer in transformer')
flags.DEFINE_integer('transformer_decoder_feedforward_dim', 256, 'dimension of feed forward layer in transformer')
flags.DEFINE_integer('gnn_transformer_num_edge_labels', 20, 'number of edge labels in gnn transformer')
flags.DEFINE_bool('gnn_encode_edge_label_with_matrix', True, 'whether to encode edge label with matrix')
flags.DEFINE_integer('gnn_relative_position_clipped_range', 8, 'clip distance of relative position representations')
flags.DEFINE_integer('gnn_max_decode_clip_range', 8, 'clip distance of decode sequence')
# Optimization
flags.DEFINE_bool('use_scheduler', False, 'whether to use learning rate scheduler')
flags.DEFINE_float('lr', 0.001, 'learning rate')
flags.DEFINE_enum('optimizer', 'adam', [
'adam', 'sgd', 'rmsprop', 'adadelta'], 'optimizer to use')
flags.DEFINE_integer('warmup_steps', 800, 'number of steps to increase learning rate')
flags.DEFINE_float('adam_beta_1', 0.9, 'hyper-parameter beta_1 of adam')
flags.DEFINE_float('adam_beta_2', 0.999, 'hyper-parameter beta_2 of adam')
flags.DEFINE_float('adam_eps', 1e-8, 'hyper-parameter epsilon of adam')
flags.DEFINE_enum('scheduler', 'noam', ['noam', 'bert', 'finetune_bert_noam'], 'scheduler for transformer based models')
flags.DEFINE_integer('batch_size', 32, 'batch size')
flags.DEFINE_integer(
'patient', 10, 'Number of epochs to be patient before early stopping')
flags.DEFINE_integer('epoch', 1, 'Number of epoch to train')
flags.DEFINE_integer('model_save_interval', 1, 'Interval to save model')
flags.DEFINE_float('gradient_clip', 5.0, 'Clip gradient')
flags.DEFINE_string('validation_metric', '+accuracy', 'validation metric')
# Utils
flags.DEFINE_string('serialization_dir', os.path.join(
'trained_models', 'geo'), 'Path to save trained models')
# Evaluation
flags.DEFINE_bool('save_prediction_result', False,
'Whether to save prediction result')
flags.DEFINE_string('checkpoint', 'best.th', 'Checkpoint to evaluate')
flags.DEFINE_integer('beam_size', 1, 'Beam Search Size')
FLAGS = flags.FLAGS
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Seed all GPUs with the same seed if available.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def save_flags(FLAGs):
with open(os.path.join(FLAGS.serialization_dir, 'config.txt'), 'w') as f:
f.write(FLAGS.flags_into_string())
def build_data_reader(FLAGS):
splitter = SpacyWordSplitter(pos_tags=True)
question_tokenizer = WordTokenizer(SpacyWordSplitter(pos_tags=True))
reader = None
if FLAGS.model == 'parsing':
# Parsing
grammar = get_grammar(FLAGS.task, FLAGS.language)
assert grammar is not None
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
reader = GrammarBasedDataReader(
question_tokenizer, grammar, logical_form_preprocessor=logical_form_preprocessor,
maximum_target_length=max_target_length)
elif FLAGS.model in ['copy_parsing', 'copy_parsing_2']:
# Parsing
grammar = get_grammar(FLAGS.task, FLAGS.language)
assert grammar is not None
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
entity_matcher = get_entity_matcher(FLAGS.task, FLAGS.language)
utterance_preprocessor = get_utterance_preprocessor(FLAGS.task, FLAGS.language)
reader = GrammarCopyBasedDataReader(
question_tokenizer, grammar, logical_form_preprocessor=logical_form_preprocessor,
utterance_preprocessor=utterance_preprocessor,
copy_link_finder=entity_matcher, maximum_target_length=max_target_length)
elif FLAGS.model == 'translation':
# Translation
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
is_parsing=False)
return reader
elif FLAGS.model == 'seq_parsing':
# Parsing without grammar
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
is_parsing=True)
elif FLAGS.model == 'recombination_seq_parsing':
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language, normalize_var_with_de_brujin_index=True)
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
logical_form_preprocessor=logical_form_preprocessor,
is_parsing=True,
maximum_target_length=max_target_length
)
return reader
elif FLAGS.model == 'recombination_copy_seq_parsing':
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language, normalize_var_with_de_brujin_index=True)
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
else:
max_target_length = 0
entity_matcher = get_seq2seq_entity_matcher(FLAGS.task, FLAGS.language)
if FLAGS.language.startswith('sql'):
exclude_target_words = ['select', 'from', 'and', 'in', 'where', 'group', 'order', 'having', 'limit', 'not']
else:
exclude_target_words = None
reader = Seq2SeqDataReader(
question_tokenizer=question_tokenizer,
logical_form_tokenizer=logical_form_tokenizer,
logical_form_preprocessor=logical_form_preprocessor,
is_parsing=True,
enable_copy=True,
maximum_target_length=max_target_length,
entity_matcher=entity_matcher,
exclude_target_words=exclude_target_words
)
return reader
elif FLAGS.model in ['gnn_parsing', 'gnn_parsing2']:
logical_form_preprocessor = get_logical_form_preprocessor(
FLAGS.task, FLAGS.language, normalize_var_with_de_brujin_index=True)
logical_form_tokenizer = get_logical_form_tokenizer(FLAGS.task, FLAGS.language)
if FLAGS.do_train:
max_target_length = FLAGS.max_decode_length
allow_drop = True
else:
max_target_length = 0
allow_drop = False
grammar = get_grammar(FLAGS.task, FLAGS.language)
entity_matcher = get_gnn_entity_matcher(FLAGS.task, FLAGS.language)
entity_replacer = get_gnn_entity_replacer(FLAGS.task, FLAGS.language)
reader = GNNCopyTransformerDataReader(
entity_matcher=entity_matcher,
entity_replacer=entity_replacer,
target_grammar=grammar,
source_tokenizer=question_tokenizer,
target_tokenizer=logical_form_tokenizer,
logical_form_preprocessor=logical_form_preprocessor,
relative_position_clipped_range=FLAGS.gnn_relative_position_clipped_range,
nlabels=FLAGS.gnn_transformer_num_edge_labels,
allow_drop=allow_drop
)
return reader
return reader
def build_optimizer(FLAGS, parameters) -> optim.Optimizer:
if FLAGS.optimizer == 'adam':
optimizer = optim.Adam(parameters, lr=FLAGS.lr)
elif FLAGS.optimizer == 'sgd':
optimizer = optim.SGD(parameters, lr=FLAGS.lr, momentum=0,
dampening=0, weight_decay=0, nesterov=False)
elif FLAGS.optimizer == 'rmsprop':
optimizer = optim.RMSprop(parameters, lr=FLAGS.lr, alpha=0.95)
elif FLAGS.optimizer == 'adadelta':
optimizer = optim.Adadelta(parameters, lr=FLAGS.lr)
else:
optimizer = None
return optimizer
def build_lr_scheduler(FLAGS, optimizer) -> LearningRateScheduler:
if not FLAGS.use_scheduler:
return None
allen_scheduler = None
if FLAGS.optimizer == 'rmsprop':
scheduler = MultiStepLR(optimizer, milestones=[5] + list(range(6, 200)), gamma=0.98)
allen_scheduler = PyTorchMultiStepLearningRateSchedulerWrapper(scheduler)
elif FLAGS.optimizer == 'sgd':
scheduler = MultiStepLR(optimizer, milestones=[15, 20, 25, 30], gamma=0.5)
allen_scheduler = PyTorchMultiStepLearningRateSchedulerWrapper(scheduler)
elif FLAGS.optimizer == 'adam':
if FLAGS.scheduler == 'noam':
print('Use Noam Scheduler')
allen_scheduler = NoamLR(optimizer, model_size=FLAGS.transformer_encoder_hidden_dim,
warmup_steps=FLAGS.warmup_steps)
return allen_scheduler
def main(argv):
set_random_seed(FLAGS.seed)
print(FLAGS.flags_into_string())
reader = build_data_reader(FLAGS)
assert reader is not None
if FLAGS.do_train:
is_test = False
save_flags(FLAGS)
train_dataset, test_dataset = reader.read(
FLAGS.train_data), reader.read(FLAGS.test_data)
vocab = Vocabulary.from_instances(
train_dataset, min_count={'source_tokens': FLAGS.min_count})
else:
is_test = True
test_dataset = reader.read(FLAGS.test_data)
vocab = Vocabulary.from_files(os.path.join(
FLAGS.serialization_dir, 'vocabulary'))
if FLAGS.model == 'parsing':
model = build_grammar_model(FLAGS, reader, vocab, reader.grammar)
elif FLAGS.model == 'copy_parsing':
model = build_grammar_copy_model(FLAGS, reader, vocab, reader.grammar)
elif FLAGS.model == 'copy_parsing_2':
model = build_grammar_copy_model_2(FLAGS, reader, vocab, reader.grammar)
elif FLAGS.model == 'translation':
model = build_seq2seq_model(FLAGS, reader, vocab)
elif FLAGS.model == 'recombination_seq_parsing':
model = build_parsing_recombination_seq2seq_model(FLAGS, reader, vocab)
elif FLAGS.model == 'recombination_copy_seq_parsing':
model = build_parsing_recombination_seq2seq_copy_model(FLAGS, reader, vocab)
elif FLAGS.model == 'gnn_parsing':
model = build_gnn_parsing_model(FLAGS, reader, vocab, is_test=not FLAGS.do_train)
elif FLAGS.model == 'gnn_parsing2':
model = build_gnn_parsing_model2(FLAGS, reader, vocab, is_test=not FLAGS.do_train)
else:
model = build_parsing_seq2seq_model(FLAGS, reader, vocab)
print(model)
assert model is not None
print("Cuda Available: ", torch.cuda.is_available())
if torch.cuda.is_available():
cuda_device = list(range(torch.cuda.device_count()))
print("Cuda device: ", cuda_device)
if len(cuda_device) > 1:
print("Enable Multiple GPU: ", cuda_device)
# Enable Multiple GPU
model = model.cuda(cuda_device[0])
else:
cuda_device = cuda_device[0]
model = model.cuda(cuda_device)
else:
cuda_device = -1
if FLAGS.do_train:
with torch.autograd.set_detect_anomaly(False):
model.train()
optimizer = build_optimizer(FLAGS, model.parameters())
assert optimizer is not None
allen_scheduler = build_lr_scheduler(FLAGS, optimizer)
vocab.save_to_files(os.path.join(
FLAGS.serialization_dir, 'vocabulary'))
iterator = BucketIterator(batch_size=FLAGS.batch_size, sorting_keys=[
("source_tokens", "num_tokens")])
iterator.index_with(vocab)
trainer = CustomTrainer(model=model,
optimizer=optimizer,
iterator=iterator,
train_dataset=train_dataset,
validation_dataset=test_dataset,
patience=FLAGS.patient,
num_epochs=FLAGS.epoch,
cuda_device=cuda_device,
serialization_dir=FLAGS.serialization_dir,
grad_clipping=FLAGS.gradient_clip,
validation_metric=FLAGS.validation_metric,
should_log_learning_rate=True,
summary_interval=5,
num_serialized_models_to_keep=5,
learning_rate_scheduler=allen_scheduler,
loss_fn=None)
trainer.train()
else:
# Load Model
with open(os.path.join(FLAGS.serialization_dir, FLAGS.checkpoint), 'rb') as f:
model.load_state_dict(torch.load(f))
model.eval()
iterator = BucketIterator(batch_size=FLAGS.batch_size, sorting_keys=[
("source_tokens", "num_tokens")])
iterator.index_with(vocab)
metrics = model_evaluate(
model, test_dataset, iterator, cuda_device, batch_weight_key='')
for key, metric in metrics.items():
print("%s: %s" % (key, str(metric)))
if FLAGS.save_prediction_result:
results = list()
predictor = get_predictor(model, reader)
total, correct = 0, 0
preprocessor = get_logical_form_preprocessor(FLAGS.task, FLAGS.language)
postprocessor = get_logical_form_postprocessor(
FLAGS.task, FLAGS.language)
for idx in itertools.islice(range(len(test_dataset)), 0, len(test_dataset), FLAGS.batch_size):
instances = test_dataset[idx:idx + FLAGS.batch_size]
total += len(instances)
predictions = predictor.predict_batch_instance(instances)
for inst, pred in zip(instances, predictions):
if FLAGS.model == 'parsing':
is_correct, result = evaluations.evaluate_grammar_based_prediction(
inst, pred, reader.grammar, preprocessor, postprocessor)
elif FLAGS.model in ['copy_parsing', 'copy_parsing_2']:
is_correct, result = evaluations.evaluate_grammar_copy_based_prediction(
inst, pred, reader.grammar, preprocessor, postprocessor)
elif FLAGS.model in ['seq_parsing', 'recombination_seq_parsing']:
is_correct, result = evaluations.evaluate_seq_parsing_prediction(
inst, pred, FLAGS.language)
elif FLAGS.model in ['recombination_copy_seq_parsing']:
is_correct, result = evaluations.evaluate_seq_copy_parsing_prediction(
inst, pred, FLAGS.language
)
elif FLAGS.model in ['gnn_parsing', 'gnn_parsing2']:
is_correct, result = evaluations.evaluate_gnn_parsing_prediction(
inst, pred, FLAGS.language
)
else:
# Translation
is_correct, result = evaluations.evaluate_translation_prediction(
inst, pred, FLAGS.language)
if is_correct:
correct += 1
results.append(result)
assert total == len(test_dataset)
print('Total: %d, Correct: %d, Accuracy: %f' %
(total, correct, correct / total))
with open(os.path.join(FLAGS.serialization_dir, 'predictions.json'), 'w') as f:
f.write(json.dumps(results, indent=4))
if __name__ == '__main__':
app.run(main)
| 21,059 | 46.432432 | 120 | py |
Unimer | Unimer-master/neural_models/recombination_seq2seq_copy.py | # coding=utf8
from typing import Dict, List, Tuple
import numpy
from overrides import overrides
import torch
import torch.nn.functional as F
from torch.nn.modules.linear import Linear
from torch.nn.modules.rnn import LSTMCell
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers import Token
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.training.metrics import Metric
from allennlp.nn.beam_search import BeamSearch
from allennlp.training.metrics import BLEU
from allennlp.nn import util, InitializerApplicator
class RecombinationSeq2SeqWithCopy(Model):
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
seq_metrics: Metric,
attention: Attention,
beam_size: int = None,
source_namespace: str = 'source_tokens',
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.,
use_bleu: bool = False,
encoder_input_dropout: int = 0.0,
encoder_output_dropout: int = 0.0,
dropout=0.0,
feed_output_attention_to_decoder: bool = False,
keep_decoder_output_dim_same_as_encoder: bool = True,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
super(RecombinationSeq2SeqWithCopy, self).__init__(vocab)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._scheduled_sampling_ratio = scheduled_sampling_ratio
# We need the start symbol to provide as the input at the first timestep of decoding, and
# end symbol as a way to indicate the end of the decoded sequence.
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace) # pylint: disable=protected-access
# Evaluation Metrics
if use_bleu:
pad_index = self.vocab.get_token_index(self.vocab._padding_token, self._target_namespace) # pylint: disable=protected-access
self._bleu = BLEU(exclude_indices={pad_index, self._end_index, self._start_index})
else:
self._bleu = None
self._seq_metric = seq_metrics
# At prediction time, we use a beam search to find the most likely sequence of target tokens.
beam_size = beam_size or 1
self._max_decoding_steps = max_decoding_steps
self._beam_search = BeamSearch(self._end_index, max_steps=max_decoding_steps, beam_size=beam_size)
# Dense embedding of source vocab tokens.
self._source_embedder = source_embedder
# Encoder
# Encodes the sequence of source embeddings into a sequence of hidden states.
self._encoder = encoder
self._encoder_output_dim = self._encoder.get_output_dim()
# Attention mechanism applied to the encoder output for each step.
self._attention = attention
self._feed_output_attention_to_decoder = feed_output_attention_to_decoder
if self._feed_output_attention_to_decoder:
# If using attention, a weighted average over encoder outputs will be concatenated
# to the previous target embedding to form the input to the decoder at each
# time step.
self._decoder_input_dim = self._encoder_output_dim + target_embedding_dim
else:
# Otherwise, the input to the decoder is just the previous target embedding.
self._decoder_input_dim = target_embedding_dim
# Decoder
# Dense embedding of vocab words in the target space.
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._num_classes = num_classes
target_embedding_dim = target_embedding_dim or source_embedder.get_output_dim()
self._target_embedder = Embedding(num_classes, target_embedding_dim)
# TODO: relax this assumption
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
self._keep_decoder_output_dim_same_as_encoder = keep_decoder_output_dim_same_as_encoder
if not self._keep_decoder_output_dim_same_as_encoder:
self._decoder_output_dim = int(self._encoder_output_dim / 2) if encoder.is_bidirectional() \
else self._encoder_output_dim
else:
self._decoder_output_dim = self._encoder_output_dim
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
self._transform_decoder_init_state = torch.nn.Sequential(
torch.nn.Linear(self._encoder_output_dim, self._decoder_output_dim),
torch.nn.Tanh()
)
# Generate Score
self._output_projection_layer = Linear(self._decoder_output_dim + self._encoder_output_dim, num_classes)
# Dropout Layers
self._encoder_input_dropout = torch.nn.Dropout(p=encoder_input_dropout)
self._encoder_output_dropout = torch.nn.Dropout(p=encoder_output_dropout)
self._output_dropout = torch.nn.Dropout(p=dropout)
self._embedded_dropout = torch.nn.Dropout(p=dropout)
initializer(self)
def _prepare_output_projections(self,
last_predictions: torch.Tensor,
state: Dict[str, torch.Tensor])\
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
# pylint: disable=line-too-long
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Add dropout before the softmax classifier (Following "Language to Logical Form with Neural Attention")
Inputs are the same as for `take_step()`.
last_predictions: (group_size,)
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (group_size, decoder_output_dim)
decoder_hidden = state["decoder_hidden"]
# shape: (group_size, decoder_output_dim)
decoder_context = state["decoder_context"]
# shape: (group_size, target_embedding_dim)
copy_mask = (last_predictions < self._num_classes).long()
embedded_input = self._target_embedder(last_predictions * copy_mask)
if not self.training and copy_mask.sum() < copy_mask.size(0):
# Copy, Retrieve target token
mapped_indices = list()
source_token_ids = state['source_token_ids']
for gidx, idx in enumerate(last_predictions):
if idx >= self._num_classes:
source_idx = idx - self._num_classes
source_token_id = int(source_token_ids[gidx,source_idx])
token = self.vocab.get_token_from_index(source_token_id, self._source_namespace)
tid = self.vocab.get_token_index(token, self._target_namespace)
mapped_indices.append(tid)
else:
mapped_indices.append(self._pad_index)
# mapped_indices to tensor
mapped_indices = torch.from_numpy(numpy.array(mapped_indices))
mapped_indices = mapped_indices.to(last_predictions.device)
copyed_embedded_input = self._target_embedder(mapped_indices)
unsqueezed_copy_mask = copy_mask.unsqueeze(dim=1).float()
embedded_input = embedded_input * unsqueezed_copy_mask + copyed_embedded_input * (1 - unsqueezed_copy_mask)
embedded_input = self._embedded_dropout(embedded_input)
if self._feed_output_attention_to_decoder:
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((embedded_input, state["attention_context"]), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = embedded_input
# shape (decoder_hidden): (group_size, decoder_output_dim)
# shape (decoder_context): (group_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input,
(decoder_hidden, decoder_context))
state["decoder_hidden"] = decoder_hidden
state["decoder_context"] = decoder_context
# output_attended_input: shape: (group_size, encoder_output_dim)
# attention_weights shape: (group_size, max_input_sequence_length)
output_attended_input, attention_weights = self._prepare_output_attended_input(
decoder_hidden,
encoder_outputs,
source_mask
)
if self._feed_output_attention_to_decoder:
state["attention_context"] = output_attended_input
output_projection_input = torch.cat((decoder_hidden, output_attended_input), -1)
dropped_output_projection_input = self._output_dropout(output_projection_input)
# shape: (group_size, num_classes)
output_projections = self._output_projection_layer(dropped_output_projection_input)
# shape: (group_size, num_classes + max_input_sequence_length)
output_projections = torch.cat((output_projections, attention_weights), -1)
return output_projections, state
def take_step(self,
last_predictions: torch.Tensor,
state: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take a decoding step. This is called by the beam search class.
Parameters
----------
last_predictions : ``torch.Tensor``
A tensor of shape ``(group_size,)``, which gives the indices of the predictions
during the last time step.
state : ``Dict[str, torch.Tensor]``
A dictionary of tensors that contain the current state information
needed to predict the next step, which includes the encoder outputs,
the source mask, and the decoder hidden state and context. Each of these
tensors has shape ``(group_size, *)``, where ``*`` can be any other number
of dimensions.
Returns
-------
Tuple[torch.Tensor, Dict[str, torch.Tensor]]
A tuple of ``(log_probabilities, updated_state)``, where ``log_probabilities``
is a tensor of shape ``(group_size, num_classes)`` containing the predicted
log probability of each class for the next step, for each item in the group,
while ``updated_state`` is a dictionary of tensors containing the encoder outputs,
source mask, and updated decoder hidden state and context.
Notes
-----
We treat the inputs as a batch, even though ``group_size`` is not necessarily
equal to ``batch_size``, since the group may contain multiple states
for each source sentence in the batch.
"""
# shape: (group_size, num_classes + max_input_sequence_length)
output_projections, state = self._prepare_output_projections(last_predictions, state)
source_mask = state['source_mask']
group_size = source_mask.size(0)
# (batch_size, num_classes + max_input_sequence_length)
normalization_mask = torch.cat([source_mask.new_ones((group_size, self._num_classes)),
source_mask], dim=-1)
# shape: (group_size, num_classes + max_input_sequence_length)
class_log_probabilities = util.masked_log_softmax(output_projections, normalization_mask, dim=-1)
return class_log_probabilities, state
@overrides
def forward(self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None,
target_source_token_map: torch.Tensor = None,
meta_field: List[Dict] = None,
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Make foward pass with decoder logic for producing the entire target sequence.
Parameters
----------
source_tokens : ``Dict[str, torch.LongTensor]``
The output of `TextField.as_array()` applied on the source `TextField`. This will be
passed through a `TextFieldEmbedder` and then through an encoder.
target_tokens : ``Dict[str, torch.LongTensor]``, optional (default = None)
Output of `Textfield.as_array()` applied on target `TextField`. We assume that the
target tokens are also represented as a `TextField`.
target_source_token_map: (batch_size, target_length, source_length)
Returns
-------
Dict[str, torch.Tensor]
"""
state = self._encode(source_tokens)
if target_tokens:
state = self._init_decoder_state(state)
# The `_forward_loop` decodes the input sequence and computes the loss during training
# and validation.
output_dict = self._forward_loop(state, target_tokens, target_source_token_map)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
output_dict.update({"source_token_ids": source_tokens['tokens']})
if target_tokens:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = self.map_predictions(top_k_predictions[:, 0, :], source_tokens['tokens'], meta_field)
if self._bleu:
self._bleu(best_predictions, target_tokens["tokens"])
if self._seq_metric:
self._seq_metric(
best_predictions.float(),
gold_labels=target_tokens["tokens"][:, 1:].float(),
mask=util.get_text_field_mask(
target_tokens).float()[:, 1:]
)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the ``forward`` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
# Collect indices till the first end_symbol
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = list()
for x in indices:
if x < self._num_classes:
predicted_tokens.append(self.vocab.get_token_from_index(x, namespace=self._target_namespace))
else:
source_idx = x - self._num_classes
text = "@@copy@@%d" % int(source_idx)
token = Token(text)
# source_token_id = int(output_dict['source_token_ids'][0][source_idx])
# token = self.vocab.get_token_from_index(source_token_id, self._source_namespace)
predicted_tokens.append(token)
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
embedded_input = self._encoder_input_dropout(embedded_input)
encoder_outputs = self._encoder(embedded_input, source_mask)
encoder_outputs = self._encoder_output_dropout(encoder_outputs)
return {
"source_token_ids": source_tokens['tokens'],
"source_mask": source_mask,
"encoder_outputs": encoder_outputs,
}
def _init_decoder_state(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_size = state["source_mask"].size(0)
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
state["encoder_outputs"],
state["source_mask"],
self._encoder.is_bidirectional())
# Initialize the decoder hidden state with the final output of the encoder.
# shape: (batch_size, decoder_output_dim)
state["decoder_hidden"] = self._transform_decoder_init_state(final_encoder_output)
# shape: (batch_size, decoder_output_dim)
state["decoder_context"] = state["encoder_outputs"].new_zeros(batch_size, self._decoder_output_dim)
if self._feed_output_attention_to_decoder:
state["attention_context"] = state["encoder_outputs"].new_zeros(batch_size, self._encoder_output_dim)
return state
def _forward_loop(self,
state: Dict[str, torch.Tensor],
target_tokens: Dict[str, torch.LongTensor] = None,
target_source_token_map: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
"""
Make forward pass during training or do greedy search during prediction.
Notes
-----
We really only use the predictions from the method to test that beam search
with a beam size of 1 gives the same results.
"""
# shape: (batch_size, max_input_sequence_length)
source_mask = state["source_mask"]
batch_size = source_mask.size()[0]
if target_tokens:
# shape: (batch_size, max_target_sequence_length)
targets = target_tokens["tokens"]
_, target_sequence_length = targets.size()
# The last input from the target is either padding or the end symbol.
# Either way, we don't have to process it.
num_decoding_steps = target_sequence_length - 1
else:
num_decoding_steps = self._max_decoding_steps
# Initialize target predictions with the start index.
# shape: (batch_size,)
last_predictions = source_mask.new_full((batch_size,), fill_value=self._start_index)
step_logits: List[torch.Tensor] = []
step_predictions: List[torch.Tensor] = []
for timestep in range(num_decoding_steps):
if self.training and torch.rand(1).item() < self._scheduled_sampling_ratio:
# Use gold tokens at test time and at a rate of 1 - _scheduled_sampling_ratio
# during training.
# shape: (batch_size,)
input_choices = last_predictions
elif not target_tokens:
# shape: (batch_size,)
input_choices = last_predictions
else:
# shape: (batch_size,)
input_choices = targets[:, timestep]
# shape: (batch_size, num_classes + max_input_sequence_length)
output_projections, state = self._prepare_output_projections(input_choices, state)
# list of tensors, shape: (batch_size, 1, num_classes + max_input_sequence_length)
step_logits.append(output_projections.unsqueeze(1))
# (batch_size, num_classes + max_input_sequence_length)
normalization_mask = torch.cat([source_mask.new_ones((batch_size, self._num_classes)),
source_mask], dim=-1)
class_probabilities = util.masked_softmax(output_projections, normalization_mask, dim=-1)
# shape (predicted_classes): (batch_size,)
_, predicted_classes = torch.max(class_probabilities, 1)
# shape (predicted_classes): (batch_size,)
last_predictions = predicted_classes
step_predictions.append(last_predictions.unsqueeze(1))
# shape: (batch_size, num_decoding_steps)
predictions = torch.cat(step_predictions, 1)
output_dict = {"predictions": predictions}
if target_tokens:
# shape: (batch_size, num_decoding_steps, num_classes + max_input_sequence_length)
logits = torch.cat(step_logits, 1)
# Compute loss.
target_mask = util.get_text_field_mask(target_tokens)
loss = self._get_loss(logits, targets, target_mask, target_source_token_map)
output_dict["loss"] = loss
return output_dict
def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Make forward pass during prediction using a beam search."""
batch_size = state["source_mask"].size()[0]
start_predictions = state["source_mask"].new_full((batch_size,), fill_value=self._start_index)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_step)
output_dict = {
"class_log_probabilities": log_probabilities,
"predictions": all_top_k_predictions,
}
return output_dict
def _prepare_output_attended_input(self,
decoder_hidden_state: torch.Tensor = None,
encoder_outputs: torch.Tensor = None,
encoder_outputs_mask: torch.LongTensor = None) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""Apply ouput attention over encoder outputs and decoder state."""
# Ensure mask is also a FloatTensor. Or else the multiplication within
# attention will complain.
# shape: (batch_size, max_input_sequence_length)
encoder_outputs_mask = encoder_outputs_mask.float()
# shape: (batch_size, max_input_sequence_length)
input_weights = self._attention(
decoder_hidden_state, encoder_outputs, encoder_outputs_mask)
normalized_weights = util.masked_softmax(input_weights, encoder_outputs_mask)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, normalized_weights)
return attended_input, input_weights
def _get_loss(self,
logits: torch.FloatTensor,
targets: torch.LongTensor,
target_mask: torch.LongTensor,
target_source_token_map: torch.Tensor) -> torch.Tensor:
"""
Compute loss.
Takes logits (unnormalized outputs from the decoder) of size (batch_size,
num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)
and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross
entropy loss while taking the mask into account.
The length of ``targets`` is expected to be greater than that of ``logits`` because the
decoder does not need to compute the output corresponding to the last timestep of
``targets``. This method aligns the inputs appropriately to compute the loss.
``target_source_token_map``: (batch_size, target_length, source_length)
During training, we want the logit corresponding to timestep i to be similar to the target
token from timestep i + 1. That is, the targets should be shifted by one timestep for
appropriate comparison. Consider a single example where the target has 3 words, and
padding is to 7 tokens.
The complete sequence would correspond to <S> w1 w2 w3 <E> <P> <P>
and the mask would be 1 1 1 1 1 0 0
and let the logits be l1 l2 l3 l4 l5 l6
We actually need to compare:
the sequence w1 w2 w3 <E> <P> <P>
with masks 1 1 1 1 0 0
against l1 l2 l3 l4 l5 l6
(where the input was) <S> w1 w2 w3 <E> <P>
"""
# shape: (batch_size, num_decoding_steps)
relevant_targets = targets[:, 1:].contiguous()
batch_size, num_decoding_steps = relevant_targets.size()
# shape: (batch_size, num_decoding_steps)
relevant_mask = target_mask[:, 1:].contiguous()
# shape: (batch_size, num_decoding_steps, source_length)
target_source_token_map = target_source_token_map[:, 1:, :]
probs = F.softmax(logits, dim=-1)
# (batch_size * num_decoding_steps, num_classes)
generate_probs_flat = probs[:, :, :self._num_classes].view(-1, self._num_classes)
relevant_targets_flat = relevant_targets.view(-1, 1).long()
# (batch_size, num_decoding_steps)
generate_probs = torch.gather(generate_probs_flat, dim=1, index=relevant_targets_flat).reshape(batch_size,
num_decoding_steps)
# (batch_size, num_decoding_steps)
copy_probs = (probs[:, :, self._num_classes:] * target_source_token_map).sum(dim=-1)
target_log_probs = torch.log(generate_probs + copy_probs + 1e-13)
target_log_probs *= relevant_mask.float()
negative_log_likelihood = -1 * target_log_probs
weights_batch_sum = relevant_mask.sum(-1).float()
per_batch_loss = negative_log_likelihood.sum(dim=1) / (weights_batch_sum + 1e-13)
num_non_empty_sequences = ((weights_batch_sum > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._bleu:
all_metrics.update(self._bleu.get_metric(reset=reset))
if self._seq_metric:
all_metrics.update(
{"accuracy": self._seq_metric.get_metric(reset)['accuracy']})
return all_metrics
def map_predictions(self, predictions: torch.LongTensor,
source_token_ids: torch.LongTensor,
meta_field: List[Dict]) -> torch.LongTensor:
"""
Map those copy indices to target idx
:return:
"""
batch_size, max_length = predictions.size()
mapped_predictions = predictions.new_full((batch_size,max_length), fill_value=self._pad_index)
for i in range(batch_size):
source_tokens_to_copy = meta_field[i]['source_tokens_to_copy']
for j in range(max_length):
idx = predictions[i, j]
if idx < self._num_classes:
mapped_predictions[i, j] = idx
else:
# Copy
source_idx = idx - self._num_classes
if source_idx > len(source_tokens_to_copy):
tid = self._pad_index
else:
token = source_tokens_to_copy[source_idx]
# source_token_id = int(source_token_ids[i, source_idx])
# token = self.vocab.get_token_from_index(source_token_id, self._source_namespace)
tid = self.vocab.get_token_index(token, self._target_namespace)
mapped_predictions[i, j] = tid
return mapped_predictions.long()
| 29,535 | 47.182708 | 137 | py |
Unimer | Unimer-master/neural_models/seq2seq_model.py | # coding=utf8
import torch
from overrides import overrides
from typing import Dict, List, Tuple
from allennlp.training.metrics import Metric
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
from allennlp.modules import Attention, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.models.encoder_decoders.simple_seq2seq import SimpleSeq2Seq
from allennlp.modules.similarity_functions import SimilarityFunction
class Seq2SeqModel(SimpleSeq2Seq):
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
attention: Attention = None,
attention_function: SimilarityFunction = None,
beam_size: int = None,
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.,
use_bleu: bool = True,
seq_metrics=None) -> None:
self._seq_metric = seq_metrics
super(Seq2SeqModel, self).__init__(
vocab,
source_embedder,
encoder,
max_decoding_steps,
attention,
attention_function,
beam_size,
target_namespace,
target_embedding_dim,
scheduled_sampling_ratio,
use_bleu)
@overrides
def forward(self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:
state = self._encode(source_tokens)
if target_tokens:
state = self._init_decoder_state(state)
# The `_forward_loop` decodes the input sequence and computes the loss during training
# and validation.
output_dict = self._forward_loop(state, target_tokens)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
if self._bleu:
self._bleu(best_predictions, target_tokens["tokens"])
if self._seq_metric:
self._seq_metric(
best_predictions.float(),
gold_labels=target_tokens["tokens"][:, 1:].float(),
mask=util.get_text_field_mask(
target_tokens).float()[:, 1:]
)
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._bleu:
all_metrics.update(self._bleu.get_metric(reset=reset))
if self._seq_metric:
all_metrics.update(
{"accuracy": self._seq_metric.get_metric(reset)['accuracy']})
return all_metrics
| 3,441 | 37.244444 | 98 | py |
Unimer | Unimer-master/neural_models/utils.py | # coding=utf8
import numpy
import torch
from typing import List
def has_nan(x: torch.Tensor) -> bool:
return torch.isnan(x).any()
def matrix_cosine_similarity(x: torch.Tensor, y: torch.Tensor, eps: float=1e-8):
"""
:param x (batch_size, length_1, dim)
:param y (batch_size, length_2, dim)
:return
(batch_size, length_1, length_2)
"""
length_1, length_2 = x.size(1), y.size(1)
# shape: (batch_size, length_1, length_2)
dot_product = x.bmm(y.permute(0, 2, 1))
# shape: (batch_size, length_1), (batch_size, length_2)
x_norm, y_norm = x.norm(dim=-1, p=None), y.norm(dim=-1, p=None)
# added eps for numerical stability
x_norm = torch.max(x_norm, eps * x_norm.new_ones(x_norm.size()))
y_norm = torch.max(y_norm, eps * y_norm.new_ones(y_norm.size()))
expanded_x_norm = x_norm.unsqueeze(-1).repeat(1, 1, length_2)
expanded_y_norm = y_norm.unsqueeze(1).repeat(1, length_1, 1)
# shape: (batch_size, length_1, length_2)
norm = expanded_x_norm * expanded_y_norm
similarity = dot_product / norm
return similarity
def get_one_hot_mask(num_classes: int, ids: List):
targets = numpy.array(ids, dtype=int)
one_hot = numpy.eye(num_classes)[targets]
return torch.from_numpy(one_hot.sum(0)) | 1,279 | 31.820513 | 80 | py |
Unimer | Unimer-master/neural_models/GNN.py | # coding=utf8
import numpy
import torch
import torch.nn as nn
from allennlp.models.model import Model
from allennlp.data.tokenizers import Token
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.training.metrics import Metric
from allennlp.nn import util
from overrides import overrides
from typing import Dict, List, Union, Tuple
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from . import utils as nn_utils
from .modules.gnn_encoder import GNNTransformerEncoderLayer, GNNTransformerEncoder, \
GNNTransformerDecoderLayer, GNNTransformerDecoder, get_decode_edge_mask
class GNNCopyTransformer(Model):
"""
Transformer-based Seq2Seq Model
"""
def __init__(self, vocab: Vocabulary,
source_namespace: str,
target_namespace: str,
segment_namespace: str,
max_decoding_step: int,
token_based_metric: Metric,
source_embedding_dim: int = 256,
target_embedding_dim: int = 256,
encoder_d_model: int = 512,
decoder_d_model: int = 512,
encoder_nhead: int = 8,
decoder_nhead: int = 8,
num_encoder_layers: int = 6,
num_decoder_layers: int = 6,
encoder_dim_feedforward: int = 2048,
decoder_dim_feedforward: int = 2048,
dropout: float = 0.1,
beam_size: int = 1,
token_symbol: str = '@token@',
non_func_symbol: str = "@nonfunc@",
nlabels: int = 0,
max_decode_clip_range: int = 0,
encode_edge_label_with_matrix: bool = True,
is_test: bool = False,
):
super().__init__(vocab)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._segment_namespace = segment_namespace
self._src_start_index = self.vocab.get_token_index(START_SYMBOL, self._source_namespace)
self._src_end_index = self.vocab.get_token_index(END_SYMBOL, self._source_namespace)
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._oov_index = self.vocab.get_token_index(self.vocab._oov_token,
self._target_namespace) # pylint: disable=protected-access
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace)
self._token_index = self.vocab.get_token_index(token_symbol, self._segment_namespace)
self._non_func_symbol_index = self.vocab.get_token_index(non_func_symbol, self._segment_namespace)
self._segment_pad_index = self.vocab.get_token_index(self.vocab._padding_token, self._segment_namespace)
# Source Embedding
num_source_words = self.vocab.get_vocab_size(self._source_namespace)
self._use_glove = False
self._source_embedding = Embedding(num_source_words, source_embedding_dim)
# Segments
num_segment_types = self.vocab.get_vocab_size(self._segment_namespace)
segment_embedding = Embedding(num_segment_types, source_embedding_dim)
self._segment_embedder = BasicTextFieldEmbedder({'tokens': segment_embedding})
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._num_classes = num_classes
self._target_embedder = Embedding(num_classes, target_embedding_dim)
# Encoder
self._nlabels = nlabels # number of edge labels
if self._nlabels == 0:
self._use_gnn_encoder = False
encoder_layer = nn.TransformerEncoderLayer(encoder_d_model, encoder_nhead, encoder_dim_feedforward, dropout)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
else:
self._use_gnn_encoder = True
print("Use GNN Encoder")
encoder_layer = GNNTransformerEncoderLayer(d_model=encoder_d_model, nhead=encoder_nhead,
dim_feedforward=encoder_dim_feedforward,
dropout=dropout, nlabels=self._nlabels,
is_matrix=encode_edge_label_with_matrix)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = GNNTransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
# Decoder
self._max_decode_clip_range = max_decode_clip_range
if max_decode_clip_range == 0:
self._decode_nlabels = 0
self._decode_use_relative_position = False
decoder_layer = nn.TransformerDecoderLayer(decoder_d_model, decoder_nhead, decoder_dim_feedforward, dropout)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
else:
print("Use GNN Decoder")
self._decode_nlabels = self._max_decode_clip_range + 1
self._decode_use_relative_position = True
decoder_layer = GNNTransformerDecoderLayer(d_model=decoder_d_model, nhead=decoder_nhead,
dim_feedforward=decoder_dim_feedforward,
dropout=dropout, nlabels=self._decode_nlabels,
is_matrix=encode_edge_label_with_matrix)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = GNNTransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
# Decode Gate
self.gate_linear = nn.Linear(decoder_d_model, 1)
self.copy_word_prj = nn.Linear(decoder_d_model, encoder_d_model, bias=False)
self._source_embedding_dim = source_embedding_dim
self._target_embedding_dim = target_embedding_dim
self._encoder_d_model = encoder_d_model
self._decoder_d_model = decoder_d_model
self._encoder_nhead = encoder_nhead
self._decoder_nhead = decoder_nhead
self._max_decoding_step = max_decoding_step
self._token_based_metric = token_based_metric
self._beam_size = beam_size
self._is_test = is_test
self._reset_parameters()
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
@overrides
def forward(self,
source_tokens: Dict[str, torch.LongTensor],
segments: Dict[str, torch.LongTensor],
source_entity_length: torch.LongTensor,
edge_mask: torch.Tensor,
copy_targets: torch.Tensor = None,
generate_targets: torch.Tensor = None,
target_tokens: Dict[str, torch.LongTensor] = None,
meta_field: Dict = None,
) -> Dict[str, torch.Tensor]:
assert self._nlabels == edge_mask.size(1)
state = self._encode(source_tokens, segments, source_entity_length, edge_mask)
if self.training:
state = self._train_decode(state, target_tokens, generate_targets)
# shape: (batch_size, decode_length, d_model)
generate_mask = state["generate_mask"]
decoder_outputs = state["decoder_outputs"]
decode_length = decoder_outputs.size(1)
# Generate scores
# shape: (batch_size, decode_length, num_classes)
generate_scores = self.get_generate_scores(decoder_outputs)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
entity_mask = entity_mask.unsqueeze(1).repeat(1, decode_length, 1)
# shape: (batch_size, decode_length, encode_length)
copy_scores = self.get_copy_scores(state, decoder_outputs)
# shape: (batch_size, decode_length, 1)
# generate_gate = F.sigmoid(self.gate_linear(decoder_outputs))
# copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores, copy_scores), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, decode_length, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((copy_scores.size(0), decode_length, self._num_classes)),
entity_mask), dim=-1)
class_probabilities = util.masked_softmax(scores, mask=score_mask, dim=-1)
_, predicted_classes = torch.max(class_probabilities, dim=-1, keepdim=False)
targets = target_tokens["tokens"]
target_mask = state["target_mask"]
# shape: (batch_size, max_target_sequence_length)
loss = self._get_loss(class_probabilities, targets, generate_mask, copy_targets, target_mask)
output_dict = {"predictions": predicted_classes, "loss": loss}
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions, gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
else:
output_dict = self._eval_decode(state, segments)
if target_tokens:
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions[:, 1:], gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the ``forward`` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
predicted_tokens.append(Token("@entity_%d" % index))
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
predicted_tokens.append(w)
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _embed_source(self, source_tokens: Dict[str, torch.Tensor], source_entity_length: torch.LongTensor):
"""
:param source_tokens
:param source_entity_length: (batch_size, max_token_num)
:return
(batch_size, max_token_num, embedding_dim)
"""
token_ids = source_tokens['tokens']
embedded = self._source_embedding(token_ids)
batched_embedded = list()
embedding_dim = embedded.size(-1)
batch_size, max_token_num = source_entity_length.size()
for _embedded, _length in zip(embedded, source_entity_length.long()):
merged_embedded_input = list()
idx = 0
for length in _length:
if length > 0:
embedding = torch.mean(_embedded[idx:idx + length, :], dim=0)
merged_embedded_input.append(embedding)
idx += length
else:
break
merged_embedded_input = torch.stack(merged_embedded_input, dim=0)
pad_num = max_token_num - merged_embedded_input.size(0)
if pad_num > 0:
merged_embedded_input = torch.cat((merged_embedded_input,
merged_embedded_input.new_zeros([pad_num, embedding_dim])), dim=0)
batched_embedded.append(merged_embedded_input)
# shape: (batch_size, max_token_num, embedding_dim)
batched_embedded = torch.stack(batched_embedded, dim=0)
assert batched_embedded.size(0) == embedded.size(0) and batched_embedded.size(1) == source_entity_length.size(1)
# TODO: Dropout
return batched_embedded
def _encode(self, source_tokens: Dict[str, torch.Tensor], segments: Dict[str, torch.Tensor],
source_entity_length: torch.Tensor, edge_mask: torch.Tensor, ) -> Dict[str, torch.Tensor]:
"""
:param source_tokens:
:param segments:
:param merge_indicators:
:return:
"""
# shape: (batch_size, encode_length, embedding_dim)
source_embedded_input = self._embed_source(source_tokens, source_entity_length)
# shape: (batch_size, encode_length, embedding_dim)
segments_embedded_input = self._segment_embedder(segments)
encode_length = segments_embedded_input.size(1)
assert source_embedded_input.size(1) == segments_embedded_input.size(1)
# token_mask = (segments['tokens'] == self._token_index).unsqueeze(-1).float()
# valid_token_embedded_input = batched_embedded_input * token_mask
# valid_token_embedded_input = util.add_positional_features(valid_token_embedded_input)
# valid_token_embedded_input = batched_embedded_input * (1 - token_mask) + valid_token_embedded_input * token_mask
if self._source_embedding_dim == self._encoder_d_model:
batched_embedded_input = segments_embedded_input + source_embedded_input
final_embedded_input = util.add_positional_features(batched_embedded_input)
else:
batched_embedded_input = torch.cat([source_embedded_input, segments_embedded_input], dim=-1)
final_embedded_input = util.add_positional_features(batched_embedded_input)
# shape: (encode_length, batch_size, d_model)
final_embedded_input = final_embedded_input.permute(1, 0, 2)
# shape: (batch_size, encode_length)
source_mask = util.get_text_field_mask(segments)
source_key_padding_mask = (1 - source_mask.byte()).bool()
if not self._use_gnn_encoder:
# shape: (encode_length, batch_size, d_model)
encoder_outputs = self._encoder(final_embedded_input, src_key_padding_mask=source_key_padding_mask)
else:
# GNN encoders
encoder_outputs = self._encoder(src=final_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
padding_mask=source_key_padding_mask)
source_token_mask = (segments['tokens'] == self._token_index).float()
return {
"source_mask": source_mask,
"source_key_padding_mask": source_key_padding_mask,
"source_token_mask": source_token_mask,
"encoder_outputs": encoder_outputs,
"source_embedded": batched_embedded_input,
"source_raw_embedded": source_embedded_input,
}
def _train_decode(self, state: Dict[str, torch.Tensor],
target_tokens: [str, torch.Tensor],
generate_targets: torch.Tensor) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
# shape: (batch_size, encode_length, d_model)
source_embedded = state["source_raw_embedded"]
batch_size, _, _ = source_embedded.size()
basic_index = torch.arange(batch_size).to(source_embedded.device).long()
generate_targets = generate_targets.long()
retrieved_target_embedded_input = source_embedded[basic_index.unsqueeze(1), generate_targets][:, :-1, :]
target_embedded_input = self._target_embedder(target_tokens['tokens'])[:, :-1, :]
# shape: (batch_size, max_decode_length)
# where 1 indicates that the target token is generated rather than copied
generate_mask = (generate_targets == 0).float()
target_embedded_input = target_embedded_input * generate_mask[:, :-1].unsqueeze(-1) \
+ retrieved_target_embedded_input * (1 - generate_mask)[:, :-1].unsqueeze(-1)
target_embedded_input = util.add_positional_features(target_embedded_input)
# shape: (max_target_sequence_length - 1, batch_size, d_model)
target_embedded_input = target_embedded_input.permute(1, 0, 2)
# shape: (batch_size, max_target_sequence_length - 1)
"""
key_padding_mask should be a ByteTensor where True values are positions
that should be masked with float('-inf') and False values will be unchanged.
"""
target_mask = util.get_text_field_mask(target_tokens)[:, 1:]
target_key_padding_mask = (1 - target_mask.byte()).bool()
assert target_key_padding_mask.size(1) == target_embedded_input.size(0) and \
target_embedded_input.size(1) == target_key_padding_mask.size(0)
max_target_seq_length = target_key_padding_mask.size(1)
target_additive_mask = (torch.triu(
target_mask.new_ones(max_target_seq_length, max_target_seq_length)) == 1).transpose(0, 1)
target_additive_mask = target_additive_mask.float().masked_fill(target_additive_mask == 0, float('-inf'))
target_additive_mask = target_additive_mask.masked_fill(target_additive_mask == 1, float(0.0))
assert target_embedded_input.size(1) == encoder_outputs.size(1)
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
if not self._decode_use_relative_position:
# shape: (max_target_sequence_length, batch_size, d_model)
decoder_outputs = self._decoder(target_embedded_input, memory=encoder_outputs,
tgt_mask=target_additive_mask, tgt_key_padding_mask=None,
memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(target_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
batch_size = edge_mask.size(0)
tgt_padding_mask = torch.tril(edge_mask.new_ones([max_target_seq_length, max_target_seq_length]),
diagonal=0)
tgt_padding_mask = (1 - (tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1))).float()
decoder_outputs = self._decoder(target_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
# shape: (batch_size, max_target_sequence_length, d_model)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
state.update({
"decoder_outputs": decoder_outputs,
"target_key_padding_mask": target_key_padding_mask,
"target_mask": target_mask,
"generate_mask": generate_mask
})
return state
def _eval_decode(self, state: Dict[str, torch.Tensor],
segments: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
source_embedded = state["source_raw_embedded"]
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
batch_size = source_key_padding_mask.size(0)
encode_length = source_key_padding_mask.size(1)
log_probs_after_end = encoder_outputs.new_full((batch_size, self._num_classes + encode_length),
fill_value=float("-inf"))
log_probs_after_end[:, self._end_index] = 0.
start_predictions = state["source_mask"].new_full((batch_size, 1), fill_value=self._start_index)
partial_generate_predictions = start_predictions
partial_copy_predictions = state["source_mask"].new_zeros((batch_size, 1))
basic_index = torch.arange(batch_size).to(source_embedded.device).unsqueeze(1).long()
generate_mask = state["source_mask"].new_ones((batch_size, 1)).float()
# shape: (batch_size)
last_prediction = start_predictions.squeeze(1)
for _ in range(self._max_decoding_step):
# shape: (batch_size, partial_len, d_model)
partial_source_embedded_input = source_embedded[basic_index, partial_copy_predictions]
partial_target_embedded_input = self._target_embedder(partial_generate_predictions)
partial_embedded_input = partial_target_embedded_input * generate_mask.unsqueeze(-1) \
+ partial_source_embedded_input * (1 - generate_mask).unsqueeze(-1)
partial_embedded_input = util.add_positional_features(partial_embedded_input)
partial_len = partial_embedded_input.size(1)
partial_embedded_input = partial_embedded_input.permute(1, 0, 2)
mask = (torch.triu(state["source_mask"].new_ones(partial_len, partial_len)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
if not self._decode_use_relative_position:
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, memory=encoder_outputs,
tgt_mask=mask, memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(partial_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
tgt_padding_mask = torch.tril(edge_mask.new_ones([partial_len, partial_len]), diagonal=0)
tgt_padding_mask = (1 - tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1)).float()
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
outputs = outputs.permute(1, 0, 2)
# shape: (batch_size, d_model)
curr_outputs = outputs[:, -1, :]
# shape: (batch_size, num_classes)
generate_scores = self.get_generate_scores(curr_outputs)
# shape: (batch_size, encode_length)
copy_scores = self.get_copy_scores(state, curr_outputs.unsqueeze(1)).squeeze(1)
# Gate
# shape: (batch_size, 1)
# generate_gate = F.sigmoid(self.gate_linear(curr_outputs))
# copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores, copy_scores), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
# shape: (batch_size, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((batch_size, self._num_classes)), entity_mask), dim=-1)
# shape: (batch_size, num_classes + encode_length)
normalized_scores = util.masked_softmax(scores, mask=score_mask, dim=-1)
last_prediction_expanded = last_prediction.unsqueeze(-1).expand(
batch_size, self._num_classes + encode_length
)
# shape: (batch_size, num_classes + encode_length)
cleaned_logits = torch.where(
last_prediction_expanded == self._end_index,
log_probs_after_end,
normalized_scores
)
# shape: (batch_size)
_, predicted = torch.max(input=cleaned_logits, dim=1, keepdim=False)
copy_mask = (predicted >= self._num_classes).long()
generate_predicted = predicted * (1 - copy_mask)
copy_predicted = (predicted - self._num_classes) * copy_mask
partial_copy_predictions = torch.cat((partial_copy_predictions, copy_predicted.unsqueeze(1)), dim=1)
partial_generate_predictions = torch.cat((partial_generate_predictions, generate_predicted.unsqueeze(1)),
dim=1)
generate_mask = torch.cat((generate_mask, (1 - copy_mask).unsqueeze(1).float()), dim=1)
last_prediction = predicted
if (last_prediction == self._end_index).sum() == batch_size:
break
predictions = partial_generate_predictions * generate_mask.long() + \
(1 - generate_mask).long() * (partial_copy_predictions + self._num_classes)
# shape: (batch_size, partial_len)
output_dict = {
"predictions": predictions
}
return output_dict
def get_copy_scores(self, state: Dict[str, torch.Tensor],
query: torch.Tensor) -> torch.Tensor:
"""
:param state:
:param query: (batch_size, length, d_model)
:return:
"""
# shape: (batch_size, encode_length, d_model)
encoder_outputs = state["encoder_outputs"].permute(1, 0, 2)
return self.copy_word_prj(query).bmm(encoder_outputs.permute(0, 2, 1))
def get_generate_scores(self, query: torch.Tensor) -> torch.Tensor:
"""
:param query: (batch_size, length, d_model)
:return:
"""
return F.linear(query, self._target_embedder.weight)
def _get_loss(self, scores: torch.Tensor,
targets: torch.LongTensor,
generate_mask: torch.LongTensor,
copy_mask: torch.LongTensor,
target_mask: torch.LongTensor) -> torch.Tensor:
"""
:param scores: (batch_size, decode_length, num_class + encode_length)
:param targets: (batch_size, decode_length + 1)
:param generate_mask: (batch_size, decode_length + 1), where 1.0 indicates the target word is selected from target
vocabulary, 0.0 indicates the target is copied from entity candidates
:param copy_mask: (batch_size, decode_length + 1, encode_length), where 1.0 indicates that the target word
is copied from this source word
:param target_mask: (batch_size, decode_length)
:return:
"""
batch_size, decode_length, _ = scores.size()
# (batch_size, decode_length, num_class)
generate_scores = scores[:, :, :self._num_classes]
# (batch_size, decode_length, encode_length)
copy_scores = scores[:, :, self._num_classes:]
# shape: (batch_size * decode_length, 1)
relevant_targets = targets[:, 1:].contiguous().view(-1, 1)
target_generate_scores = torch.gather(
generate_scores.view(-1, self._num_classes), dim=1, index=relevant_targets)
target_scores = target_generate_scores.view(batch_size, decode_length)
target_scores = target_scores * generate_mask[:, 1:]
target_scores += (copy_scores * copy_mask[:, 1:, :].float()).sum(dim=-1)
# shape: (batch_size, decode_length)
relevant_mask = target_mask.contiguous().float()
loss = - target_scores.log() * relevant_mask
loss = loss.sum(dim=-1) / relevant_mask.sum(dim=-1)
loss = loss.sum() / batch_size
return loss
def _get_target_labels(self, target_token_ids: torch.Tensor, generate_targets: torch.Tensor):
"""
:param target_token_ids: [batch_size, decode_length]
:param generate_targets: [batch_size, decode_length]
:return:
[batch_size, decode_length]
"""
generate_mask = (generate_targets == 0.0).long()
labels = target_token_ids * generate_mask + generate_targets.long() * (1 - generate_mask)
return labels
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return self._token_based_metric.get_metric(reset)
def _get_predicted_tokens(self,
source_tokens: Dict[str, torch.LongTensor],
predicted_indices: Union[torch.Tensor, numpy.ndarray],
meta_field: List[Dict]):
"""
Convert predicted indices into tokens.
If `n_best = 1`, the result type will be `List[List[str]]`. Otherwise the result
type will be `List[List[List[str]]]`.
"""
# shape: (batch_size, encode_length)
source_token_ids = source_tokens['tokens']
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
predicted_tokens: List[Union[List[List[str]], List[str]]] = []
predicted_abstract_tokens: List[Union[List[List[str]], List[str]]] = []
for bidx, top_k_predictions in enumerate(predicted_indices):
batch_predicted_tokens: List[List[str]] = []
batch_predicted_abstract_tokens: List[List[str]] = []
pseudo_tokens = meta_field[bidx]['pseudo_tokens']
for indices in top_k_predictions:
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
tokens = list()
abstract_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
# source_word = "@entity_%d" % index
source_word = pseudo_tokens[index]
tokens.append(source_word)
abstract_tokens.append("@entity_%d" % index)
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
tokens.append(w)
abstract_tokens.append(w)
batch_predicted_tokens.append(tokens)
batch_predicted_abstract_tokens.append(abstract_tokens)
predicted_tokens.append(batch_predicted_tokens)
predicted_abstract_tokens.append(batch_predicted_abstract_tokens)
return predicted_tokens, predicted_abstract_tokens
def _get_target_tokens(self, target_token_ids: Union[torch.Tensor, numpy.ndarray]) -> List[List[str]]:
if not isinstance(target_token_ids, numpy.ndarray):
_target_token_ids = target_token_ids.detach().cpu().numpy()
else:
_target_token_ids = target_token_ids
tokens = list()
for ids in _target_token_ids:
_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace) for x in ids
if x not in [self._end_index, self._start_index, self._pad_index]]
tokens.append(_tokens)
return tokens
| 33,952 | 50.057143 | 122 | py |
Unimer | Unimer-master/neural_models/GNN2.py | # coding=utf8
import numpy
import torch
import torch.nn as nn
from allennlp.models.model import Model
from allennlp.data.tokenizers import Token
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.training.metrics import Metric
from allennlp.nn import util
from overrides import overrides
from typing import Dict, List, Union, Tuple
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from . import utils as nn_utils
from .modules.gnn_encoder import GNNTransformerEncoderLayer, GNNTransformerEncoder, \
GNNTransformerDecoderLayer, GNNTransformerDecoder, get_decode_edge_mask
class GNNCopyTransformer2(Model):
"""
Transformer-based Seq2Seq Model
"""
def __init__(self, vocab: Vocabulary,
source_namespace: str,
target_namespace: str,
segment_namespace: str,
max_decoding_step: int,
token_based_metric: Metric,
source_embedding_dim: int = 256,
target_embedding_dim: int = 256,
encoder_d_model: int = 512,
decoder_d_model: int = 512,
encoder_nhead: int = 8,
decoder_nhead: int = 8,
num_encoder_layers: int = 6,
num_decoder_layers: int = 6,
encoder_dim_feedforward: int = 2048,
decoder_dim_feedforward: int = 2048,
dropout: float = 0.1,
beam_size: int = 1,
token_symbol: str = '@token@',
non_func_symbol: str = "@nonfunc@",
nlabels: int = 0,
max_decode_clip_range: int = 0,
encode_edge_label_with_matrix: bool = True,
is_test: bool = False,
):
super().__init__(vocab)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._segment_namespace = segment_namespace
self._src_start_index = self.vocab.get_token_index(START_SYMBOL, self._source_namespace)
self._src_end_index = self.vocab.get_token_index(END_SYMBOL, self._source_namespace)
self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._oov_index = self.vocab.get_token_index(self.vocab._oov_token,
self._target_namespace) # pylint: disable=protected-access
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace)
self._token_index = self.vocab.get_token_index(token_symbol, self._segment_namespace)
self._non_func_symbol_index = self.vocab.get_token_index(non_func_symbol, self._segment_namespace)
self._segment_pad_index = self.vocab.get_token_index(self.vocab._padding_token, self._segment_namespace)
# Source Embedding
num_source_words = self.vocab.get_vocab_size(self._source_namespace)
self._use_glove = False
self._source_embedding = Embedding(num_source_words, source_embedding_dim)
# Segments
num_segment_types = self.vocab.get_vocab_size(self._segment_namespace)
segment_embedding = Embedding(num_segment_types, source_embedding_dim)
self._segment_embedder = BasicTextFieldEmbedder({'tokens': segment_embedding})
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._num_classes = num_classes
self._target_embedder = Embedding(num_classes, target_embedding_dim)
# Encoder
self._nlabels = nlabels # number of edge labels
if self._nlabels == 0:
self._use_gnn_encoder = False
encoder_layer = nn.TransformerEncoderLayer(encoder_d_model, encoder_nhead, encoder_dim_feedforward, dropout)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
else:
self._use_gnn_encoder = True
print("Use GNN Encoder")
encoder_layer = GNNTransformerEncoderLayer(d_model=encoder_d_model, nhead=encoder_nhead,
dim_feedforward=encoder_dim_feedforward,
dropout=dropout, nlabels=self._nlabels,
is_matrix=encode_edge_label_with_matrix)
encoder_norm = nn.LayerNorm(encoder_d_model)
self._encoder = GNNTransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
# Decoder
self._max_decode_clip_range = max_decode_clip_range
if max_decode_clip_range == 0:
self._decode_nlabels = 0
self._decode_use_relative_position = False
decoder_layer = nn.TransformerDecoderLayer(decoder_d_model, decoder_nhead, decoder_dim_feedforward, dropout)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
else:
print("Use GNN Decoder")
self._decode_nlabels = self._max_decode_clip_range + 1
self._decode_use_relative_position = True
decoder_layer = GNNTransformerDecoderLayer(d_model=decoder_d_model, nhead=decoder_nhead,
dim_feedforward=decoder_dim_feedforward,
dropout=dropout, nlabels=self._decode_nlabels,
is_matrix=encode_edge_label_with_matrix)
decoder_norm = nn.LayerNorm(decoder_d_model)
self._decoder = GNNTransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
# Decode Gate
self.gate_linear = nn.Linear(decoder_d_model, 1)
self.copy_word_prj = nn.Linear(decoder_d_model, encoder_d_model, bias=False)
self._source_embedding_dim = source_embedding_dim
self._target_embedding_dim = target_embedding_dim
self._encoder_d_model = encoder_d_model
self._decoder_d_model = decoder_d_model
self._encoder_nhead = encoder_nhead
self._decoder_nhead = decoder_nhead
self._max_decoding_step = max_decoding_step
self._token_based_metric = token_based_metric
self._beam_size = beam_size
self._is_test = is_test
self._reset_parameters()
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
@overrides
def forward(self,
source_tokens: Dict[str, torch.LongTensor],
segments: Dict[str, torch.LongTensor],
source_entity_length: torch.LongTensor,
edge_mask: torch.Tensor,
copy_targets: torch.Tensor = None,
generate_targets: torch.Tensor = None,
target_tokens: Dict[str, torch.LongTensor] = None,
meta_field: Dict = None,
) -> Dict[str, torch.Tensor]:
assert self._nlabels == edge_mask.size(1)
state = self._encode(source_tokens, segments, source_entity_length, edge_mask)
if self.training:
state = self._train_decode(state, target_tokens, generate_targets)
# shape: (batch_size, decode_length, d_model)
generate_mask = state["generate_mask"]
decoder_outputs = state["decoder_outputs"]
decode_length = decoder_outputs.size(1)
# Generate scores
# shape: (batch_size, decode_length, num_classes)
generate_scores = self.get_generate_scores(decoder_outputs)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
entity_mask = entity_mask.unsqueeze(1).repeat(1, decode_length, 1)
# shape: (batch_size, decode_length, encode_length)
copy_scores = self.get_copy_scores(state, decoder_outputs)
# shape: (batch_size, decode_length, 1)
generate_gate = F.sigmoid(self.gate_linear(decoder_outputs))
copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores * generate_gate, copy_scores * copy_gate), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, decode_length, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((copy_scores.size(0), decode_length, self._num_classes)),
entity_mask), dim=-1)
class_probabilities = util.masked_softmax(scores, mask=score_mask, dim=-1)
_, predicted_classes = torch.max(class_probabilities, dim=-1, keepdim=False)
targets = target_tokens["tokens"]
target_mask = state["target_mask"]
# shape: (batch_size, max_target_sequence_length)
loss = self._get_loss(class_probabilities, targets, generate_mask, copy_targets, target_mask)
output_dict = {"predictions": predicted_classes, "loss": loss}
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions, gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
else:
output_dict = self._eval_decode(state, segments)
if target_tokens:
predictions = output_dict["predictions"]
pmask = (predictions < self._num_classes).long()
_predictions = predictions * pmask + (predictions - self._num_classes) * (1 - pmask)
target_labels = self._get_target_labels(target_tokens["tokens"], generate_targets)
target_mask = util.get_text_field_mask(target_tokens)
self._token_based_metric(_predictions[:, 1:], gold_labels=target_labels[:, 1:], mask=target_mask[:, 1:])
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Finalize predictions.
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives
within the ``forward`` method.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
predicted_tokens.append(Token("@entity_%d" % index))
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
predicted_tokens.append(w)
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def _embed_source(self, source_tokens: Dict[str, torch.Tensor], source_entity_length: torch.LongTensor):
"""
:param source_tokens
:param source_entity_length: (batch_size, max_token_num)
:return
(batch_size, max_token_num, embedding_dim)
"""
token_ids = source_tokens['tokens']
embedded = self._source_embedding(token_ids)
batched_embedded = list()
embedding_dim = embedded.size(-1)
batch_size, max_token_num = source_entity_length.size()
for _embedded, _length in zip(embedded, source_entity_length.long()):
merged_embedded_input = list()
idx = 0
for length in _length:
if length > 0:
embedding = torch.mean(_embedded[idx:idx + length, :], dim=0)
merged_embedded_input.append(embedding)
idx += length
else:
break
merged_embedded_input = torch.stack(merged_embedded_input, dim=0)
pad_num = max_token_num - merged_embedded_input.size(0)
if pad_num > 0:
merged_embedded_input = torch.cat((merged_embedded_input,
merged_embedded_input.new_zeros([pad_num, embedding_dim])), dim=0)
batched_embedded.append(merged_embedded_input)
# shape: (batch_size, max_token_num, embedding_dim)
batched_embedded = torch.stack(batched_embedded, dim=0)
assert batched_embedded.size(0) == embedded.size(0) and batched_embedded.size(1) == source_entity_length.size(1)
# TODO: Dropout
return batched_embedded
def _encode(self, source_tokens: Dict[str, torch.Tensor], segments: Dict[str, torch.Tensor],
source_entity_length: torch.Tensor, edge_mask: torch.Tensor, ) -> Dict[str, torch.Tensor]:
"""
:param source_tokens:
:param segments:
:param merge_indicators:
:return:
"""
# shape: (batch_size, encode_length, embedding_dim)
source_embedded_input = self._embed_source(source_tokens, source_entity_length)
# shape: (batch_size, encode_length, embedding_dim)
segments_embedded_input = self._segment_embedder(segments)
encode_length = segments_embedded_input.size(1)
assert source_embedded_input.size(1) == segments_embedded_input.size(1)
# token_mask = (segments['tokens'] == self._token_index).unsqueeze(-1).float()
# valid_token_embedded_input = batched_embedded_input * token_mask
# valid_token_embedded_input = util.add_positional_features(valid_token_embedded_input)
# valid_token_embedded_input = batched_embedded_input * (1 - token_mask) + valid_token_embedded_input * token_mask
if self._source_embedding_dim == self._encoder_d_model:
batched_embedded_input = segments_embedded_input + source_embedded_input
final_embedded_input = util.add_positional_features(batched_embedded_input)
else:
batched_embedded_input = torch.cat([source_embedded_input, segments_embedded_input], dim=-1)
final_embedded_input = util.add_positional_features(batched_embedded_input)
# shape: (encode_length, batch_size, d_model)
final_embedded_input = final_embedded_input.permute(1, 0, 2)
# shape: (batch_size, encode_length)
source_mask = util.get_text_field_mask(segments)
source_key_padding_mask = (1 - source_mask.byte()).bool()
if not self._use_gnn_encoder:
# shape: (encode_length, batch_size, d_model)
encoder_outputs = self._encoder(final_embedded_input, src_key_padding_mask=source_key_padding_mask)
else:
# GNN encoders
encoder_outputs = self._encoder(src=final_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
padding_mask=source_key_padding_mask)
source_token_mask = (segments['tokens'] == self._token_index).float()
return {
"source_mask": source_mask,
"source_key_padding_mask": source_key_padding_mask,
"source_token_mask": source_token_mask,
"encoder_outputs": encoder_outputs,
"source_embedded": batched_embedded_input,
"source_raw_embedded": source_embedded_input,
}
def _train_decode(self, state: Dict[str, torch.Tensor],
target_tokens: [str, torch.Tensor],
generate_targets: torch.Tensor) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
# shape: (batch_size, encode_length, d_model)
source_embedded = state["source_raw_embedded"]
batch_size, _, _ = source_embedded.size()
basic_index = torch.arange(batch_size).to(source_embedded.device).long()
generate_targets = generate_targets.long()
retrieved_target_embedded_input = source_embedded[basic_index.unsqueeze(1), generate_targets][:, :-1, :]
target_embedded_input = self._target_embedder(target_tokens['tokens'])[:, :-1, :]
# shape: (batch_size, max_decode_length)
# where 1 indicates that the target token is generated rather than copied
generate_mask = (generate_targets == 0).float()
target_embedded_input = target_embedded_input * generate_mask[:, :-1].unsqueeze(-1) \
+ retrieved_target_embedded_input * (1 - generate_mask)[:, :-1].unsqueeze(-1)
target_embedded_input = util.add_positional_features(target_embedded_input)
# shape: (max_target_sequence_length - 1, batch_size, d_model)
target_embedded_input = target_embedded_input.permute(1, 0, 2)
# shape: (batch_size, max_target_sequence_length - 1)
"""
key_padding_mask should be a ByteTensor where True values are positions
that should be masked with float('-inf') and False values will be unchanged.
"""
target_mask = util.get_text_field_mask(target_tokens)[:, 1:]
target_key_padding_mask = (1 - target_mask.byte()).bool()
assert target_key_padding_mask.size(1) == target_embedded_input.size(0) and \
target_embedded_input.size(1) == target_key_padding_mask.size(0)
max_target_seq_length = target_key_padding_mask.size(1)
target_additive_mask = (torch.triu(
target_mask.new_ones(max_target_seq_length, max_target_seq_length)) == 1).transpose(0, 1)
target_additive_mask = target_additive_mask.float().masked_fill(target_additive_mask == 0, float('-inf'))
target_additive_mask = target_additive_mask.masked_fill(target_additive_mask == 1, float(0.0))
assert target_embedded_input.size(1) == encoder_outputs.size(1)
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
if not self._decode_use_relative_position:
# shape: (max_target_sequence_length, batch_size, d_model)
decoder_outputs = self._decoder(target_embedded_input, memory=encoder_outputs,
tgt_mask=target_additive_mask, tgt_key_padding_mask=None,
memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(target_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
batch_size = edge_mask.size(0)
tgt_padding_mask = torch.tril(edge_mask.new_ones([max_target_seq_length, max_target_seq_length]),
diagonal=0)
tgt_padding_mask = (1 - (tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1))).float()
decoder_outputs = self._decoder(target_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
# shape: (batch_size, max_target_sequence_length, d_model)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
state.update({
"decoder_outputs": decoder_outputs,
"target_key_padding_mask": target_key_padding_mask,
"target_mask": target_mask,
"generate_mask": generate_mask
})
return state
def _eval_decode(self, state: Dict[str, torch.Tensor],
segments: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
encoder_outputs = state["encoder_outputs"]
source_key_padding_mask = state["source_key_padding_mask"]
source_embedded = state["source_raw_embedded"]
source_token_mask = state["source_token_mask"]
memory_key_padding_mask = (1 - source_token_mask).bool()
# memory_key_padding_mask = source_key_padding_mask
batch_size = source_key_padding_mask.size(0)
encode_length = source_key_padding_mask.size(1)
log_probs_after_end = encoder_outputs.new_full((batch_size, self._num_classes + encode_length),
fill_value=float("-inf"))
log_probs_after_end[:, self._end_index] = 0.
start_predictions = state["source_mask"].new_full((batch_size, 1), fill_value=self._start_index)
partial_generate_predictions = start_predictions
partial_copy_predictions = state["source_mask"].new_zeros((batch_size, 1))
basic_index = torch.arange(batch_size).to(source_embedded.device).unsqueeze(1).long()
generate_mask = state["source_mask"].new_ones((batch_size, 1)).float()
# shape: (batch_size)
last_prediction = start_predictions.squeeze(1)
for _ in range(self._max_decoding_step):
# shape: (batch_size, partial_len, d_model)
partial_source_embedded_input = source_embedded[basic_index, partial_copy_predictions]
partial_target_embedded_input = self._target_embedder(partial_generate_predictions)
partial_embedded_input = partial_target_embedded_input * generate_mask.unsqueeze(-1) \
+ partial_source_embedded_input * (1 - generate_mask).unsqueeze(-1)
partial_embedded_input = util.add_positional_features(partial_embedded_input)
partial_len = partial_embedded_input.size(1)
partial_embedded_input = partial_embedded_input.permute(1, 0, 2)
mask = (torch.triu(state["source_mask"].new_ones(partial_len, partial_len)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
if not self._decode_use_relative_position:
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, memory=encoder_outputs,
tgt_mask=mask, memory_key_padding_mask=memory_key_padding_mask)
else:
# gnn decoder
edge_mask = get_decode_edge_mask(partial_embedded_input,
max_decode_clip_range=self._max_decode_clip_range)
tgt_padding_mask = torch.tril(edge_mask.new_ones([partial_len, partial_len]), diagonal=0)
tgt_padding_mask = (1 - tgt_padding_mask.unsqueeze(0).repeat(batch_size, 1, 1)).float()
# shape: (partial_len, batch_size, d_model)
outputs = self._decoder(partial_embedded_input, edge_mask=edge_mask.permute(0, 2, 3, 1),
memory=encoder_outputs, tgt_padding_mask=tgt_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
outputs = outputs.permute(1, 0, 2)
# shape: (batch_size, d_model)
curr_outputs = outputs[:, -1, :]
# shape: (batch_size, num_classes)
generate_scores = self.get_generate_scores(curr_outputs)
# shape: (batch_size, encode_length)
copy_scores = self.get_copy_scores(state, curr_outputs.unsqueeze(1)).squeeze(1)
# Gate
# shape: (batch_size, 1)
generate_gate = F.sigmoid(self.gate_linear(curr_outputs))
copy_gate = 1 - generate_gate
scores = torch.cat((generate_scores * generate_gate, copy_scores * copy_gate), dim=-1)
# scores = torch.cat((generate_scores, copy_scores), dim=-1)
# shape: (batch_size, encode_length)
entity_mask = 1 - ((segments['tokens'] == self._token_index) |
(segments['tokens'] == self._non_func_symbol_index) |
(segments['tokens'] == self._segment_pad_index)).float()
# shape: (batch_size, num_classes + encode_length)
score_mask = torch.cat((entity_mask.new_ones((batch_size, self._num_classes)), entity_mask), dim=-1)
# shape: (batch_size, num_classes + encode_length)
normalized_scores = util.masked_softmax(scores, mask=score_mask, dim=-1)
last_prediction_expanded = last_prediction.unsqueeze(-1).expand(
batch_size, self._num_classes + encode_length
)
# shape: (batch_size, num_classes + encode_length)
cleaned_logits = torch.where(
last_prediction_expanded == self._end_index,
log_probs_after_end,
normalized_scores
)
# shape: (batch_size)
_, predicted = torch.max(input=cleaned_logits, dim=1, keepdim=False)
copy_mask = (predicted >= self._num_classes).long()
generate_predicted = predicted * (1 - copy_mask)
copy_predicted = (predicted - self._num_classes) * copy_mask
partial_copy_predictions = torch.cat((partial_copy_predictions, copy_predicted.unsqueeze(1)), dim=1)
partial_generate_predictions = torch.cat((partial_generate_predictions, generate_predicted.unsqueeze(1)),
dim=1)
generate_mask = torch.cat((generate_mask, (1 - copy_mask).unsqueeze(1).float()), dim=1)
last_prediction = predicted
if (last_prediction == self._end_index).sum() == batch_size:
break
predictions = partial_generate_predictions * generate_mask.long() + \
(1 - generate_mask).long() * (partial_copy_predictions + self._num_classes)
# shape: (batch_size, partial_len)
output_dict = {
"predictions": predictions
}
return output_dict
def get_copy_scores(self, state: Dict[str, torch.Tensor],
query: torch.Tensor) -> torch.Tensor:
"""
:param state:
:param query: (batch_size, length, d_model)
:return:
"""
# shape: (batch_size, encode_length, d_model)
encoder_outputs = state["encoder_outputs"].permute(1, 0, 2)
return self.copy_word_prj(query).bmm(encoder_outputs.permute(0, 2, 1))
def get_generate_scores(self, query: torch.Tensor) -> torch.Tensor:
"""
:param query: (batch_size, length, d_model)
:return:
"""
return F.linear(query, self._target_embedder.weight)
def _get_loss(self, scores: torch.Tensor,
targets: torch.LongTensor,
generate_mask: torch.LongTensor,
copy_mask: torch.LongTensor,
target_mask: torch.LongTensor) -> torch.Tensor:
"""
:param scores: (batch_size, decode_length, num_class + encode_length)
:param targets: (batch_size, decode_length + 1)
:param generate_mask: (batch_size, decode_length + 1), where 1.0 indicates the target word is selected from target
vocabulary, 0.0 indicates the target is copied from entity candidates
:param copy_mask: (batch_size, decode_length + 1, encode_length), where 1.0 indicates that the target word
is copied from this source word
:param target_mask: (batch_size, decode_length)
:return:
"""
batch_size, decode_length, _ = scores.size()
# (batch_size, decode_length, num_class)
generate_scores = scores[:, :, :self._num_classes]
# (batch_size, decode_length, encode_length)
copy_scores = scores[:, :, self._num_classes:]
# shape: (batch_size * decode_length, 1)
relevant_targets = targets[:, 1:].contiguous().view(-1, 1)
target_generate_scores = torch.gather(
generate_scores.view(-1, self._num_classes), dim=1, index=relevant_targets)
target_scores = target_generate_scores.view(batch_size, decode_length)
target_scores = target_scores * generate_mask[:, 1:]
target_scores += (copy_scores * copy_mask[:, 1:, :].float()).sum(dim=-1)
# shape: (batch_size, decode_length)
relevant_mask = target_mask.contiguous().float()
loss = - target_scores.log() * relevant_mask
loss = loss.sum(dim=-1) / relevant_mask.sum(dim=-1)
loss = loss.sum() / batch_size
return loss
def _get_target_labels(self, target_token_ids: torch.Tensor, generate_targets: torch.Tensor):
"""
:param target_token_ids: [batch_size, decode_length]
:param generate_targets: [batch_size, decode_length]
:return:
[batch_size, decode_length]
"""
generate_mask = (generate_targets == 0.0).long()
labels = target_token_ids * generate_mask + generate_targets.long() * (1 - generate_mask)
return labels
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return self._token_based_metric.get_metric(reset)
def _get_predicted_tokens(self,
source_tokens: Dict[str, torch.LongTensor],
predicted_indices: Union[torch.Tensor, numpy.ndarray],
meta_field: List[Dict]):
"""
Convert predicted indices into tokens.
If `n_best = 1`, the result type will be `List[List[str]]`. Otherwise the result
type will be `List[List[List[str]]]`.
"""
# shape: (batch_size, encode_length)
source_token_ids = source_tokens['tokens']
if not isinstance(predicted_indices, numpy.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
predicted_tokens: List[Union[List[List[str]], List[str]]] = []
predicted_abstract_tokens: List[Union[List[List[str]], List[str]]] = []
for bidx, top_k_predictions in enumerate(predicted_indices):
batch_predicted_tokens: List[List[str]] = []
batch_predicted_abstract_tokens: List[List[str]] = []
pseudo_tokens = meta_field[bidx]['pseudo_tokens']
for indices in top_k_predictions:
indices = list(indices)
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
tokens = list()
abstract_tokens = list()
for x in indices:
if x in [self._end_index, self._start_index, self._pad_index]:
continue
if x >= self._num_classes:
index = x - self._num_classes
# source_word = "@entity_%d" % index
source_word = pseudo_tokens[index]
tokens.append(source_word)
abstract_tokens.append("@entity_%d" % index)
else:
w = self.vocab.get_token_from_index(x, namespace=self._target_namespace)
tokens.append(w)
abstract_tokens.append(w)
batch_predicted_tokens.append(tokens)
batch_predicted_abstract_tokens.append(abstract_tokens)
predicted_tokens.append(batch_predicted_tokens)
predicted_abstract_tokens.append(batch_predicted_abstract_tokens)
return predicted_tokens, predicted_abstract_tokens
def _get_target_tokens(self, target_token_ids: Union[torch.Tensor, numpy.ndarray]) -> List[List[str]]:
if not isinstance(target_token_ids, numpy.ndarray):
_target_token_ids = target_token_ids.detach().cpu().numpy()
else:
_target_token_ids = target_token_ids
tokens = list()
for ids in _target_token_ids:
_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace) for x in ids
if x not in [self._end_index, self._start_index, self._pad_index]]
tokens.append(_tokens)
return tokens
| 34,001 | 50.130827 | 122 | py |
Unimer | Unimer-master/neural_models/grammar_based_models.py | # coding=utf8
import numpy
import torch
import torch.nn as nn
from typing import Dict, List
from overrides import overrides
from allennlp.training.metrics import Metric
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from .modules.grammar_copy_decoder import LSTMGrammarCopyDecoder
from .modules.grammar_copy_decoder_2 import LSTMGrammarCopyDecoder as LSTMGrammarCopyDecoder2
class GrammarModel(Model):
def __init__(self, vocab: Vocabulary, source_embedder: BasicTextFieldEmbedder, encoder, decoder, metric, flags, regularizer=None):
super().__init__(vocab, regularizer=regularizer)
self._source_embedder = source_embedder
self._encoder = encoder
self._encoder_input_dropout = nn.Dropout(p=flags.encoder_input_dropout)
self._encoder_output_dropout = nn.Dropout(
p=flags.encoder_output_dropout)
self._decoder = decoder
self._metric = metric
@overrides
def forward(self,
source_tokens: Dict[str, torch.LongTensor],
source_token_copy_indices: torch.Tensor = None,
target_rules: torch.LongTensor = None,
target_nonterminals: torch.LongTensor = None,
target_mask: torch.LongTensor=None,
target_allow_copy_mask: torch.Tensor = None,
meta_field: List[Dict] = None,):
state = self.encode(source_tokens)
if isinstance(self._decoder, LSTMGrammarCopyDecoder) or isinstance(self._decoder, LSTMGrammarCopyDecoder2):
output_dict = self._decoder(
encodings=state['encoder_outputs'],
source_mask=state['source_mask'],
source_token_copy_indices=source_token_copy_indices,
target_rules=target_rules,
target_nonterminals=target_nonterminals,
target_mask=target_mask,
target_allow_copy_mask=target_allow_copy_mask,
meta_field=meta_field
)
else:
output_dict = self._decoder(
encodings=state['encoder_outputs'],
source_mask=state['source_mask'],
target_rules=target_rules,
target_nonterminals=target_nonterminals,
target_mask=target_mask,
meta_field=meta_field
)
if self.training:
self._metric(output_dict['predicted_rules'].float(
), gold_labels=target_rules[:, 1:].float(), mask=target_mask[:, 1:].float())
else:
self._metric(output_dict['predicted_rules'].float(
), gold_labels=target_rules.float(), mask=target_mask.float())
return output_dict
def encode(self, source_tokens: Dict[str, torch.LongTensor]):
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
embedded_input = self._encoder_input_dropout(embedded_input)
encoder_outputs = self._encoder(embedded_input, source_mask)
encoder_outputs = self._encoder_output_dropout(encoder_outputs)
return {
"source_mask": source_mask,
"encoder_outputs": encoder_outputs,
}
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
accuracy = self._metric.get_metric(reset)['accuracy']
return {"accuracy": accuracy}
| 3,744 | 42.546512 | 134 | py |
Unimer | Unimer-master/neural_models/recombination_seq2seq.py | # coding=utf8
import numpy
import torch
from typing import Dict, Tuple, Union, List, Any
from allennlp.models import SimpleSeq2Seq
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, Attention, SimilarityFunction
from allennlp.nn import util, InitializerApplicator
from allennlp.training.metrics import Metric
from overrides import overrides
from torch.nn import Linear, LSTMCell
class RecombinationSeq2Seq(SimpleSeq2Seq):
"""
Neural Architecture taken from "Data Recombination for Neural Semantic Parsing"
"""
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
seq_metrics: Metric,
input_attention: Attention = None,
input_attention_function: SimilarityFunction = None,
beam_size: int = None,
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.,
use_bleu: bool = True,
encoder_input_dropout: int = 0.0,
encoder_output_dropout: int = 0.0,
dropout=0.0,
output_attention: Attention = None,
feed_output_attention_to_decoder: bool = False,
keep_decoder_output_dim_same_as_encoder: bool = True,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
super().__init__(vocab, source_embedder, encoder, max_decoding_steps, input_attention,
input_attention_function, beam_size, target_namespace, target_embedding_dim,
scheduled_sampling_ratio, use_bleu)
self._seq_metric = seq_metrics
self._pad_index = self.vocab.get_token_index(self.vocab._padding_token,
self._target_namespace) # pylint: disable=protected-access
self._output_attention = output_attention
self._encoder_input_dropout = torch.nn.Dropout(p=encoder_input_dropout)
self._encoder_output_dropout = torch.nn.Dropout(p=encoder_output_dropout)
self._output_dropout = torch.nn.Dropout(p=dropout)
self._embedded_dropout = torch.nn.Dropout(p=dropout)
self._feed_output_attention_to_decoder = feed_output_attention_to_decoder
self._keep_decoder_output_dim_same_as_encoder = keep_decoder_output_dim_same_as_encoder
if not self._keep_decoder_output_dim_same_as_encoder:
self._decoder_output_dim = int(self._encoder_output_dim / 2) if encoder.is_bidirectional() \
else self._encoder_output_dim
self._transform_decoder_init_state = torch.nn.Sequential(
torch.nn.Tanh(),
torch.nn.Linear(self._encoder_output_dim, self._decoder_output_dim)
)
if self._feed_output_attention_to_decoder:
self._decoder_input_dim = target_embedding_dim + self._encoder_output_dim
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
else:
self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_output_dim)
num_classes = self.vocab.get_vocab_size(self._target_namespace)
if self._output_attention:
# self._fuse_decoder_hidden_attention_layout = torch.nn.Sequential(torch.nn.Tanh(), Linear(
# self._decoder_output_dim * 2, self._decoder_output_dim
# ))
self._output_projection_layer = Linear(self._decoder_output_dim + self._encoder_output_dim, num_classes)
else:
self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
initializer(self)
def _prepare_output_attended_input(self,
decoder_hidden_state: torch.LongTensor = None,
encoder_outputs: torch.LongTensor = None,
encoder_outputs_mask: torch.LongTensor = None) -> torch.Tensor:
"""Apply ouput attention over encoder outputs and decoder state."""
# Ensure mask is also a FloatTensor. Or else the multiplication within
# attention will complain.
# shape: (batch_size, max_input_sequence_length)
encoder_outputs_mask = encoder_outputs_mask.float()
# shape: (batch_size, max_input_sequence_length)
input_weights = self._output_attention(
decoder_hidden_state, encoder_outputs, encoder_outputs_mask)
# shape: (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
return attended_input
@overrides
def _prepare_output_projections(self,
last_predictions: torch.Tensor,
state: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
# pylint: disable=line-too-long
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Add dropout before the softmax classifier (Following "Language to Logical Form with Neural Attention")
Inputs are the same as for `take_step()`.
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (group_size, decoder_output_dim)
decoder_hidden = state["decoder_hidden"]
# shape: (group_size, decoder_output_dim)
decoder_context = state["decoder_context"]
# shape: (group_size, target_embedding_dim)
embedded_input = self._target_embedder(last_predictions)
embedded_input = self._embedded_dropout(embedded_input)
if self._attention:
# shape: (group_size, encoder_output_dim)
attended_input = self._prepare_attended_input(decoder_hidden, encoder_outputs, source_mask)
# shape: (group_size, decoder_output_dim + target_embedding_dim)
decoder_input = torch.cat((attended_input, embedded_input), -1)
else:
# shape: (group_size, target_embedding_dim)
decoder_input = embedded_input
if self._feed_output_attention_to_decoder:
decoder_input = torch.cat((decoder_input, state["attention_context"]), -1)
# shape (decoder_hidden): (batch_size, decoder_output_dim)
# shape (decoder_context): (batch_size, decoder_output_dim)
decoder_hidden, decoder_context = self._decoder_cell(
decoder_input,
(decoder_hidden, decoder_context))
state["decoder_hidden"] = decoder_hidden
state["decoder_context"] = decoder_context
if self._output_attention:
# shape: (group_size, encoder_output_dim)
output_attended_input = self._prepare_output_attended_input(decoder_hidden, encoder_outputs, source_mask)
if self._feed_output_attention_to_decoder:
state["attention_context"] = output_attended_input
# output_projection_input = self._fuse_decoder_hidden_attention_layout(torch.cat((decoder_hidden,
# output_attended_input), -1))
output_projection_input = torch.cat((decoder_hidden, output_attended_input), -1)
else:
output_projection_input = decoder_hidden
# dropped_output_projection_input = self._input_dropout(output_projection_input)
dropped_output_projection_input = self._output_dropout(output_projection_input)
# shape: (group_size, num_classes)
output_projections = self._output_projection_layer(dropped_output_projection_input)
return output_projections, state
def _encode(self, source_tokens: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# shape: (batch_size, max_input_sequence_length, encoder_input_dim)
embedded_input = self._source_embedder(source_tokens)
# shape: (batch_size, max_input_sequence_length)
source_mask = util.get_text_field_mask(source_tokens)
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
embedded_input = self._encoder_input_dropout(embedded_input)
encoder_outputs = self._encoder(embedded_input, source_mask)
encoder_outputs = self._encoder_output_dropout(encoder_outputs)
return {
"source_mask": source_mask,
"encoder_outputs": encoder_outputs,
}
@overrides
def _init_decoder_state(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
batch_size = state["source_mask"].size(0)
# shape: (batch_size, encoder_output_dim)
final_encoder_output = util.get_final_encoder_states(
state["encoder_outputs"],
state["source_mask"],
self._encoder.is_bidirectional())
# Initialize the decoder hidden state with the final output of the encoder.
# shape: (batch_size, decoder_output_dim)
state["decoder_hidden"] = self._transform_decoder_init_state(final_encoder_output)
# shape: (batch_size, decoder_output_dim)
state["decoder_context"] = state["encoder_outputs"].new_zeros(batch_size, self._decoder_output_dim)
if self._feed_output_attention_to_decoder:
state["attention_context"] = state["encoder_outputs"].new_zeros(batch_size, self._encoder_output_dim)
return state
@overrides
def forward(self, # type: ignore
source_tokens: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:
state = self._encode(source_tokens)
if target_tokens:
state = self._init_decoder_state(state)
# The `_forward_loop` decodes the input sequence and computes the loss during training
# and validation.
output_dict = self._forward_loop(state, target_tokens)
else:
output_dict = {}
if not self.training:
state = self._init_decoder_state(state)
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
if self._bleu:
self._bleu(best_predictions, target_tokens["tokens"])
if self._seq_metric:
self._seq_metric(
best_predictions.float(),
gold_labels=target_tokens["tokens"][:, 1:].float(),
mask=util.get_text_field_mask(
target_tokens).float()[:, 1:]
)
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._bleu:
all_metrics.update(self._bleu.get_metric(reset=reset))
if self._seq_metric:
all_metrics.update(
{"accuracy": self._seq_metric.get_metric(reset)['accuracy']})
return all_metrics
| 11,830 | 47.093496 | 122 | py |
Unimer | Unimer-master/neural_models/modules/grammar_decoder.py | # coding=utf-8
import torch
import copy
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from overrides import overrides
from allennlp.modules import Embedding
from typing import Tuple, List, Dict
from .. import utils as nn_utils
class LSTMGrammarDecoder(nn.Module):
def __init__(self,
grammar,
ast_class,
lstm_hidden_dim: int,
num_lstm_layers: int,
rule_pad_index: int,
rule_embedding_dim: int,
nonterminal_pad_index: int,
nonterminal_end_index: int,
nonterminal_embedding_dim: int,
source_encoding_dim: int,
dropout: float,
max_target_length: int,
):
super().__init__()
self._grammar = grammar
self._root_rule = grammar.get_production_rule_by_id(grammar.root_rule_id)
self._ast_class = ast_class
self._lstm_hidden_dim = lstm_hidden_dim
self._num_lstm_layers = num_lstm_layers
# Production Rules + PAD Rule
self._rule_pad_index = rule_pad_index
self._num_rules = grammar.num_rules + 1
self._rule_embedding_dim = rule_embedding_dim
print("Rule Pad Index: ", self._rule_pad_index)
# Non-Terminals + PAD Node
self._nonterminal_end_index = nonterminal_end_index
self._nonterminal_pad_index = nonterminal_pad_index
self._num_nonterminals = grammar.num_non_terminals + 2
self._nonterminal_embedding_dim = nonterminal_embedding_dim
print("Non-Terminal Pad Index: ", self._nonterminal_pad_index)
print("Non-Terminal End Index: ", self._nonterminal_end_index)
self._source_encoding_dim = source_encoding_dim
self._max_target_length = max_target_length
self._transform_encodings_key = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
self._transform_encodings_value = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
# Input: (Attention Context + Previous Rule Embedding + Current Nonterminal Embedding)
decode_lstm_input_dim = lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim
self._decoder_lstm = nn.LSTM(
input_size=decode_lstm_input_dim,
hidden_size=lstm_hidden_dim,
num_layers=num_lstm_layers,
batch_first=False
)
self._attn_dropout = nn.Dropout(p=dropout)
self._decode_dropout = nn.Dropout(p=dropout)
self._rule_embedder = Embedding(self._num_rules, rule_embedding_dim)
self._nonterminal_embedder = Embedding(self._num_nonterminals, nonterminal_embedding_dim)
self._attention_hidden_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim + lstm_hidden_dim, lstm_hidden_dim),
nn.Tanh(),
)
# Rule Predictions
self._rule_prediction_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, rule_embedding_dim),
# nn.Tanh()
)
self._rule_prediction_bias = nn.Parameter(torch.FloatTensor(self._num_rules).zero_())
@overrides
def forward(self,
encodings: torch.Tensor,
source_mask: torch.Tensor,
target_rules: torch.Tensor,
target_nonterminals: torch.Tensor,
target_mask: torch.Tensor,
meta_field: List[Dict] = None,
):
"""
:param encodings: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:param column_mask: (batch_size, length)
:param target_rules: (batch_size, target_length)
:param target_nonterminals: (batch_size, target_length)
:param target_mask: (batch_size, target_length)
"""
if self.training:
output_dict = self.train_decode(encodings, source_mask, target_rules, target_nonterminals, target_mask)
else:
output_dict = self.eval_decode(encodings, source_mask)
return output_dict
def train_decode(self, encodings, source_mask, target_rules, target_nonterminals, target_mask):
source_length = encodings.size(1)
batch_size, target_length = target_rules.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_probs = list()
for ts in range(target_length - 1):
# Input
# (batch_size, 1, rule_embedding_size)
prev_rule_embedded = self._rule_embedder(target_rules[:, ts].unsqueeze(1).long())
prev_embedded = prev_rule_embedded
# (batch_size, 1, nonterminal_embedding_size)
curr_nonterminal_embedded = self._nonterminal_embedder(target_nonterminals[:, ts].unsqueeze(1).long())
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
# (batch_size, ts + 1, length)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
for bidx in range(batch_size):
# Keep Valid Rule
nonterminal_id = int(target_nonterminals[bidx, ts])
if nonterminal_id == self._nonterminal_pad_index or nonterminal_id == self._nonterminal_end_index:
active_rule_ids = [0]
else:
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal_id(nonterminal_id)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
rule_scores[bidx, :].masked_fill_((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(rule_scores, dim=-1)
rule_probs.append(curr_rule_probs)
rule_probs = torch.stack(rule_probs, dim=0).permute(1, 0, 2)
# Loss
loss = self.get_loss(rule_probs=rule_probs, target_rules=target_rules[:, 1:].long(), target_mask=target_mask[:, 1:].float())
# Predicted Labels
_, predicted_rules = rule_probs.max(dim=-1)
output_dict = {"loss": loss, "predicted_rules": predicted_rules}
return output_dict
def eval_decode(self, encodings, source_mask):
batch_size, source_length, _ = encodings.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_pad_index_tensor = torch.Tensor([self._rule_pad_index]).long().to(encodings.device)
nonterminal_pad_index_tensor = torch.Tensor([self._nonterminal_pad_index]).long().to(encodings.device)
ast_results, is_complete = list(), list()
for i in range(batch_size):
ast_results.append(self._ast_class(root_rule=self._root_rule))
is_complete.append(False)
for ts in range(self._max_target_length):
prev_embedded = list()
curr_nonterminal_embedded = list()
for bidx, ast in enumerate(ast_results):
if is_complete[bidx]:
# PAD
prev_embedded.append(self._rule_embedder(rule_pad_index_tensor))
curr_nonterminal_embedded.append(self._nonterminal_embedder(nonterminal_pad_index_tensor))
else:
last_production_rule = ast.get_last_production_rule()
# Rule
rule_index_tensor = torch.Tensor([last_production_rule.rule_id]).long().to(encodings.device)
prev_embedded.append(self._rule_embedder(rule_index_tensor))
# Curr Non-Terminal
curr_non_terminal_id = self._grammar.get_non_terminal_id(ast.get_curr_non_terminal())
nonterminal_index_tensor = torch.Tensor([curr_non_terminal_id]).long().to(encodings.device)
curr_nonterminal_embedded.append(
self._nonterminal_embedder(nonterminal_index_tensor)
)
# (batch_size, 1, rule_embedding_size)
prev_embedded = torch.stack(prev_embedded, dim=0)
# (batch_size, 1, type_embedding_size)
curr_nonterminal_embedded = torch.stack(curr_nonterminal_embedded, dim=0)
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
is_finish = True
for bidx, ast in enumerate(ast_results):
if not is_complete[bidx]:
curr_non_terminal = ast.get_curr_non_terminal()
# Rule
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal(curr_non_terminal)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
brule_scores = rule_scores[bidx, :].masked_fill((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(brule_scores, dim=-1)
rule_id = int(torch.argmax(curr_rule_probs))
production_rule = self._grammar.get_production_rule_by_id(rule_id)
ast.add_rule(production_rule)
if ast.is_complete:
is_complete[bidx] = True
else:
is_finish = False
if is_finish:
break
# Pad For evaluation
predicted_rules = list()
max_length = 0
for ast in ast_results:
rules = ast.get_production_rules()
rule_ids = [rule.rule_id for rule in rules]
predicted_rules.append(np.array(rule_ids, dtype=int))
if len(rules) > max_length:
max_length = len(rules)
# Pad
for i in range(batch_size):
if len(predicted_rules[i]) < max_length:
predicted_rules[i] = np.concatenate(
[predicted_rules[i], np.ones(max_length - len(predicted_rules[i])) * self._rule_pad_index],
axis=0
)
predicted_rules = torch.from_numpy(np.array(predicted_rules, dtype=int)).to(encodings.device)
output_dict = {
"loss": torch.Tensor([0.0]).to(encodings.device),
"predicted_rules": predicted_rules.long()
}
return output_dict
def take_decode_step(self,
source_encoding_key: torch.Tensor,
source_encoding_value: torch.Tensor,
source_mask: torch.Tensor,
decoder_inputs: torch.Tensor,
decoder_hidden_state: Tuple[torch.Tensor, torch.Tensor],
):
"""
:param source_encoding_key: (batch_size, length, hidden_dim)
:param source_encoding_value: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:decoder_inputs: (batch_size, 1, lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim)
:decoder_hidden_state: (h, c)
:return
decoder_outputs: (batch_size, 1, lstm_hidden_dim)
context: (batch_size, 1, hidden_dim)
att: (batch_size, 1, lstm_hidden_dim)
decoder_hidden_state: (h, c)
"""
decoder_outputs, (h, c) = self._decoder_lstm(decoder_inputs.permute(1, 0, 2), decoder_hidden_state)
decoder_hidden_state = (h, c)
# (batch_size, 1, lstm_hidden_dim)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
# Attention
# (batch_size, 1, length)
weights = decoder_outputs.bmm(source_encoding_key.permute(0, 2, 1))
weights = weights.masked_fill((1 - source_mask.unsqueeze(1)).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
# (batch_size, 1, hidden_dim)
context = weights.bmm(source_encoding_value)
att = self._attention_hidden_layer(torch.cat([decoder_outputs, context], dim=-1))
att = self._attn_dropout(att)
return decoder_outputs, context, att, decoder_hidden_state
def get_loss(self,
rule_probs: torch.FloatTensor,
target_rules: torch.LongTensor,
target_mask: torch.FloatTensor,
):
"""
:param rule_probs (batch_size, target_length, num_rules)
:param target_mask (batch_size, target_length)
"""
batch_size, target_length = target_rules.size()
rule_probs = torch.gather(
rule_probs.reshape(-1, self._num_rules),
dim=1,
index=target_rules.reshape(-1).unsqueeze(-1).long()
)
rule_probs = rule_probs.reshape(batch_size, target_length)
rule_log_probs = (rule_probs + 1e-10).log()
rule_log_probs *= target_mask.float()
rule_normalize_factor = target_mask.sum(-1)
rule_normalize_factor[rule_normalize_factor == 0] = 1
rule_loss = rule_log_probs.sum(-1) / rule_normalize_factor.float()
rule_loss = -1 * (rule_loss.sum() / batch_size)
return rule_loss | 14,904 | 44.166667 | 137 | py |
Unimer | Unimer-master/neural_models/modules/gnn_multi_head_attention.py | # coding=utf8
import math
import torch
import numpy as np
import torch.nn as nn
from allennlp.nn import util
from torch.nn import Parameter
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
class GNNMatrixMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_ks = Parameter(torch.Tensor(nlabels, d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_ks)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask: (batch_size, len_q, len_k)
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
# shape: (nlabels, batch_size, len_q, len_k)
mask = edge_mask.permute(3, 0, 1, 2)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# shape: (nhead * sz_b, len_k, d_q)
edge_values = list()
attention_weights = list()
for i in range(self._nlabels):
w = self._w_ks[i]
ek = F.linear(k, w).view(sz_b, len_k, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_k, d_q)
ek = ek.permute(2, 0, 1, 3).contiguous().view(-1, len_k, self._d_q)
edge_values.append(ek)
aw = query.bmm(ek.permute(0, 2, 1))
attention_weights.append(aw / self._attention_temperature)
# (nlabels, sz_b * nhead, len_q, len_k)
attention_weights = torch.stack(attention_weights, dim=0)
# (nlabels, sz_b * nhead, len_q, len_k)
attention_weights = attention_weights * mask.repeat(1, self._nhead, 1, 1)
attention_weights = attention_weights.sum(dim=0)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
output = attention_weights.new_zeros((self._nhead * sz_b, len_q, self._d_q))
for i in range(self._nlabels):
v, m = edge_values[i], mask[i]
_m = m.repeat(self._nhead, 1, 1)
output += (attention_weights * _m).bmm(v)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
class GNNVectorMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._b_ks = Parameter(torch.Tensor(self._nlabels, d_model))
self._b_vs = Parameter(torch.Tensor(self._nlabels, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
xavier_uniform_(self._b_ks)
xavier_uniform_(self._b_vs)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask: (batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
self._w_k.to(k.device)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# key
edge_vectors = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_ks).reshape(sz_b, len_q, len_k,
self._d_model)
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (sz_b, len_q, len_k, d_model)
key = key.unsqueeze(1).repeat(1, len_q, 1, 1)
key = edge_vectors + key
key = key.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key = key.contiguous().view(-1, len_q, len_k, self._d_q)
mask = (edge_mask.sum(-1) > 0).float().repeat(self._nhead, 1, 1)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = torch.mul(query.unsqueeze(2).repeat(1, 1, len_k, 1), key).sum(-1)
attention_weights = attention_weights / self._attention_temperature
attention_weights = attention_weights * mask
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
# value
# shape: (sz_b, len_k, d_model)
# value = F.linear(k, self._w_v)
# # shape: (sz_b, len_q, len_k, d_model)
# value = value.unsqueeze(1).repeat(1, len_q, 1, 1)
# value = edge_vectors + value
# value = value.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# # shape: (nhead * sz_b, len_q, len_k, d_q)
# value = value.contiguous().view(-1, len_q, len_k, self._d_q)
value = key
output = ((attention_weights * mask).unsqueeze(-1) * value).sum(2)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
class GNNVectorMultiHeadAttention2(nn.Module):
"""
Implementation based on "Self-Attention with Relative Position Representations"
According to Tensor2Tensor
https://github.com/tensorflow/tensor2tensor/blob/ab918e0d9592394614aa2e10cfc8f23e8cb24dfc/tensor2tensor/layers/common_attention.py
"""
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_q = nn.Linear(d_model, d_model)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._b_ks = Parameter(torch.Tensor(self._nlabels, self._d_q))
self._b_vs = Parameter(torch.Tensor(self._nlabels, self._d_q))
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
xavier_uniform_(self._b_ks)
xavier_uniform_(self._b_vs)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask:(batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
self._w_k.to(k.device)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
expanded_query = query.unsqueeze(2).repeat(1, 1, len_k, 1)
# Relation Embeddings
# shape: (sz_b, len_q, len_k, d_q)
key_relation_embeded = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_ks).reshape(sz_b, len_q, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key_relation_embeded = key_relation_embeded.repeat(self._nhead, 1, 1, 1)
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (nhead * sz_b, len_k, d_q)
key = key.view(sz_b, len_k, self._nhead, self._d_q).permute(2, 0, 1, 3).contiguous().view(-1, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, len_k)
qk_weights = query.bmm(key.permute(0, 2, 1))
# shape: (nhead * sz_b, len_q, len_k)
qkr_weights = torch.mul(expanded_query, key_relation_embeded).sum(-1)
attention_weights = qk_weights + qkr_weights
output_attention_weights = attention_weights / self._attention_temperature
# attention_weights = attention_weights.masked_fill(
# padding_mask.repeat(self._nhead, 1, 1).bool(),
# float('-inf'),
# )
# relation mask
# shape: (nhead * sz_b, len_q, len_k)
# Note that we need ensure that there are at least one relations for each position
# eye_mask = torch.eye(len_q).unsqueeze(0).repeat(sz_b, 1, 1).to(edge_mask.device)
# relation_mask = ((edge_mask.sum(-1) + eye_mask + (1 - padding_mask)) == 0).repeat(self._nhead, 1, 1)
relation_mask = ((edge_mask.sum(-1) + (1 - padding_mask)) == 0).repeat(self._nhead, 1, 1)
attention_weights = output_attention_weights.masked_fill(
relation_mask.bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = attention_weights.masked_fill(
relation_mask.bool(),
0.0
)
# Remove nan
# attention_weights[attention_weights != attention_weights] = 0
attention_weights = self._attn_dropout(attention_weights)
# Value Relation Embeddings
# shape: (sz_b, len_q, len_k, d_q)
value_relation_embeded = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_vs).reshape(sz_b, len_q, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
value_relation_embeded = value_relation_embeded.repeat(self._nhead, 1, 1, 1)
# shape: (sz_b, len_k, d_model)
value = F.linear(k, self._w_v)
# shape: (nhead * sz_b, len_k, d_q)
value = value.view(sz_b, len_k, self._nhead, self._d_q).permute(2, 0, 1, 3).contiguous().view(-1, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
qv_output = attention_weights.bmm(value)
# shape: (nhead * sz_b, len_q, d_q)
qvr_output = torch.mul(attention_weights.unsqueeze(-1), value_relation_embeded).sum(2)
output = qv_output + qvr_output
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output, output_attention_weights
class GNNVectorContinuousMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, d_model)
:param padding_mask: (batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
# query
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# key
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (sz_b, len_q, len_k, d_model)
key = key.unsqueeze(1).repeat(1, len_q, 1, 1)
key = edge_mask + key
key = key.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key = key.contiguous().view(-1, len_q, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = torch.mul(query.unsqueeze(2).repeat(1, 1, len_k, 1), key).sum(-1)
attention_weights = attention_weights / self._attention_temperature
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
# value
# shape: (sz_b, len_k, d_model)
value = F.linear(k, self._w_v)
# shape: (sz_b, len_q, len_k, d_model)
value = value.unsqueeze(1).repeat(1, len_q, 1, 1)
value = edge_mask + value
value = value.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
value = value.contiguous().view(-1, len_q, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, d_p)
output = (attention_weights.unsqueeze(-1) * value).sum(2)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
| 17,043 | 40.77451 | 134 | py |
Unimer | Unimer-master/neural_models/modules/gnn_encoder.py | # coding=utf8
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import MultiheadAttention
from .gnn_multi_head_attention import GNNMatrixMultiHeadAttention, GNNVectorMultiHeadAttention, \
GNNVectorContinuousMultiHeadAttention, GNNVectorMultiHeadAttention2
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def get_decode_edge_mask(tgt, max_decode_clip_range):
"""
:param max_decode_clip_range:
:param tgt: (tgt_length, batch_size, d_model)
:return:
(batch_size, max_decode_clip_range, tgt_length, tgt_length)
"""
tgt_length, batch_size, _ = tgt.size()
edge_mask = list()
i = 0
while i < tgt_length and i < max_decode_clip_range + 1:
mask = torch.diag(tgt.new_ones(tgt_length - i))
if mask.size(0) == tgt_length:
edge_mask.append(mask)
else:
mask = F.pad(mask, [0, i, i, 0], mode='constant', value=0)
edge_mask.append(mask)
i += 1
if i < max_decode_clip_range + 1:
edge_mask = torch.stack(edge_mask, dim=0)
# shape: (tgt_length, tgt_length, tgt_length)
edge_mask = torch.cat((edge_mask, tgt.new_zeros([max_decode_clip_range - i + 1,
tgt_length, tgt_length])), dim=0)
else:
# i == max_decode_clip_range
if i < tgt_length:
edge_mask[-1] = torch.tril(tgt.new_ones([tgt_length, tgt_length]),
diagonal=-1 * max_decode_clip_range)
edge_mask = torch.stack(edge_mask, dim=0)
edge_mask = edge_mask.unsqueeze(0).repeat(batch_size, 1, 1, 1)
return edge_mask
class GNNTransformerEncoderLayer(nn.Module):
def __init__(self, d_model: int, nhead: int, dim_feedforward: int, nlabels: int,
dropout=0.1, is_matrix=True, is_discrete: bool = True):
super(GNNTransformerEncoderLayer, self).__init__()
if is_matrix:
self.self_attn = GNNMatrixMultiHeadAttention(d_model, nhead, nlabels, dropout)
else:
print("GNN Vector Multi Head Attention")
if is_discrete:
# self.self_attn = GNNVectorMultiHeadAttention(d_model, nhead, nlabels, dropout)
self.self_attn = GNNVectorMultiHeadAttention2(d_model, nhead, nlabels, dropout)
else:
self.self_attn = GNNVectorContinuousMultiHeadAttention(d_model, nhead, dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, src, edge_mask, padding_mask):
"""
Each sub-layer is followed by a residual connection and layer normalization
:param src: (batch_size, src_length, d_model)
:param edge_mask: (batch_size, nlabels, src_length, src_length)
:param padding_mask: (batch_size, src_length, src_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
"""
src2, attention_weights = self.self_attn.forward(q=src, k=src, edge_mask=edge_mask, padding_mask=padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attention_weights
class GNNTransformerEncoderWithMemoryLayer(nn.Module):
def __init__(self, d_model: int, nhead: int, dim_feedforward: int, memory_nlabels: int, self_nlabels: int,
dropout: float = 0.1, is_matrix: bool = True, kdim: int = None, vdim: int = None):
super(GNNTransformerEncoderWithMemoryLayer, self).__init__()
if is_matrix:
self.attn = GNNMatrixMultiHeadAttention(d_model, nhead, memory_nlabels + self_nlabels, dropout)
else:
print("GNN Vector Multi Head Attention")
self.attn = GNNVectorMultiHeadAttention2(d_model, nhead, memory_nlabels + self_nlabels, dropout)
self._memory_nlabels = memory_nlabels
self._self_nlabels = self_nlabels
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, memory, memory_edge_mask, memory_padding_mask, src, src_edge_mask, src_padding_mask):
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
:param memory: (batch_size, memory_length, d_model)
:param memory_edge_mask: (batch_size, src_length, memory_length, memory_nlabels)
:param memory_padding_mask: (batch_size, src_length, memory_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:param src: (batch_size, src_length, d_model)
:param src_edge_mask: (batch_size, src_length, src_length, nlabels,)
:param src_padding_mask: (batch_size, src_length, src_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
"""
# shape: (batch_size, memory_length + src_length, d_model)
key = torch.cat([memory, src], dim=1)
batch_size, src_length, memory_length, memory_nlabels = memory_edge_mask.size()
self_nlabels = src_edge_mask.size(-1)
# shape: (batch_size, src_length, memory_length, memory_nlabels + self_nlabels, )
extended_memory_edge_mask = torch.cat([memory_edge_mask, memory_edge_mask.new_zeros((batch_size, src_length, memory_length, self_nlabels,))], dim=-1)
# shape: (batch_size, src_length, src_length, memory_nlabels + self_nlabels)
extended_src_edge_mask = torch.cat([src_edge_mask.new_zeros((batch_size, src_length, src_length, memory_nlabels)), src_edge_mask], dim=-1)
# shape: (batch_size, src_length, memory_length + src_length, memory_nlabels + self_nlabels)
edge_mask = torch.cat([extended_memory_edge_mask, extended_src_edge_mask], dim=2)
# shape: (batch_size, src_length, memory_length + src_length)
padding_mask = torch.cat([memory_padding_mask, src_padding_mask], dim=-1)
src2 = self.attn.forward(q=src, k=key, edge_mask=edge_mask, padding_mask=padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class GNNTransformerDecoderLayer(nn.Module):
def __init__(self, d_model: int, nhead: int, dim_feedforward: int, nlabels: int,
dropout: float = 0.1, is_matrix: bool = True, kdim: int = None, vdim: int = None):
super(GNNTransformerDecoderLayer, self).__init__()
if is_matrix:
self.self_attn = GNNMatrixMultiHeadAttention(d_model, nhead, nlabels, dropout)
else:
print("GNN Vector Multi Head Attention")
self.self_attn = GNNVectorMultiHeadAttention2(d_model, nhead, nlabels, dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, kdim=kdim, vdim=vdim)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, tgt, edge_mask, tgt_padding_mask, memory, memory_mask=None, memory_key_padding_mask=None):
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
:param tgt: (tgt_length, batch_size, d_model)
:param edge_mask: (batch_size, nlabels, tgt_length, decode_length)
:param tgt_padding_mask: (batch_size, tgt_length, tgt_length)
:param memory: (src_length, batch_size, d_model)
:param memory_mask: (src_length, src_length)
:param memory_key_padding_mask: (batch_size, src_length)
"""
# shape: (batch_size, tgt_length, d_model)
permuted_tgt = tgt.permute(1, 0, 2)
tgt2, _ = self.self_attn(q=permuted_tgt, k=permuted_tgt, edge_mask=edge_mask, padding_mask=tgt_padding_mask)
tgt2 = tgt2.permute(1, 0, 2)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
class GNNTransformerEncoder(nn.Module):
def __init__(self, encoder_layer: GNNTransformerEncoderLayer,
num_layers: int, norm=None, output_weights: bool = False):
super(GNNTransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self._output_weights = output_weights
def forward(self, src, edge_mask, padding_mask=None):
"""
:param src: (src_length, batch_size, encoder_d_model)
:param edge_mask: (batch_size, src_length, src_length, nlabels,) | (batch_size, num_layers, src_length, src_length, nlabels)
:param padding_mask: (batch_size, src_length)
where True values are positions that should be masked with float('-inf') and False values will be unchanged.
:return:
(src_length, batch_size, d_model)
"""
# shape: (batch_size, src_length, d_model)
length, batch_size, _ = src.size()
if padding_mask is None:
padding_mask = edge_mask.new_ones((batch_size, length, length)).float()
else:
padding_mask = padding_mask.unsqueeze(1).expand(batch_size, length, length).float()
# shape: (batch_size, src_length, d_model)
output = src.permute(1, 0, 2)
layer_weights = list()
for i in range(self.num_layers):
if len(edge_mask.size()) == 4:
# (nhead * batch_size, src_length, src_length)
output, attention_weights = self.layers[i](output, edge_mask=edge_mask, padding_mask=padding_mask)
layer_weights.append(attention_weights)
else:
# (nhead * batch_size, src_length, src_length)
output, attention_weights = self.layers[i](output, edge_mask=edge_mask[:, i, :, :, :], padding_mask=padding_mask)
layer_weights.append(attention_weights)
if self.norm:
output = self.norm(output)
output = output.permute(1, 0, 2)
if self._output_weights:
# (num_layers, nhead * batch_size, src_length, src_length)
layer_weights = torch.stack(layer_weights, dim=0)
# (nhead, batch_size, num_layers, src_length, src_length)
layer_weights = layer_weights.permute(1, 0, 2, 3).contiguous().reshape(-1, batch_size, self.num_layers, length, length)
# (batch_size, num_layers, nhead, src_length, src_length)
layer_weights = layer_weights.permute(1, 2, 0, 3, 4)
return output, layer_weights
return output
class GNNTransformerEncoderWithMemory(nn.Module):
def __init__(self, encoder_layer: GNNTransformerEncoderWithMemoryLayer,
num_layers: int, norm=None):
super(GNNTransformerEncoderWithMemory, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, memory, memory_edge_mask, memory_padding_mask, src, src_edge_mask, src_padding_mask):
"""
:param memory: (memory_length, batch_size, d_model)
:param memory_edge_mask: (batch_size, src_length, memory_length, memory_nlabels)
:param memory_padding_mask: (batch_size, src_length, memory_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:param src: (src_length, batch_size, d_model)
:param src_edge_mask: (batch_size, src_length, src_length, nlabels,)
:param src_padding_mask: (batch_size, src_length, src_length), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
(src_length, batch_size, d_model)
"""
# shape: (batch_size, src_length, d_model)
output = src.permute(1, 0, 2)
permuted_memory = memory.permute(1, 0, 2)
for i in range(self.num_layers):
output = self.layers[i](permuted_memory, memory_edge_mask, memory_padding_mask, output, src_edge_mask, src_padding_mask)
if self.norm:
output = self.norm(output)
output = output.permute(1, 0, 2)
return output
class GNNTransformerDecoder(nn.Module):
r"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
num_layers: the number of sub-decoder-layers in the decoder (required).
norm: the layer normalization component (optional).
"""
def __init__(self, decoder_layer, num_layers, norm=None):
super(GNNTransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, tgt, edge_mask, memory, tgt_padding_mask=None,
memory_mask=None,
memory_key_padding_mask=None):
r"""Pass the inputs (and mask) through the decoder layer in turn.
:param memory_key_padding_mask: (batch_size, src_length)
:param memory_mask: (src_length, src_length)
:param memory: (src_length, batch_size, d_model)
:param tgt: (tgt_length, batch_size, d_model)
:param edge_mask: (batch_size, nlabels, tgt_length, tgt_length)
:param tgt_padding_mask: (batch_size, tgt_length, tgt_length), where True values are positions that should be masked with float('-inf') and False values will be unchanged.
:return:
(src_length, batch_size, d_model)
"""
output = tgt
tgt_length, batch_size, _ = tgt.size()
if tgt_padding_mask is None:
_tgt_padding_mask = tgt.new_ones((batch_size, tgt_length, tgt_length))
else:
_tgt_padding_mask = tgt_padding_mask
for i in range(self.num_layers):
output = self.layers[i](output, memory=memory, tgt_padding_mask=_tgt_padding_mask,
edge_mask=edge_mask, memory_mask=memory_mask,
memory_key_padding_mask=memory_key_padding_mask)
if self.norm:
output = self.norm(output)
return output
class TransformerDecoderLayer(nn.Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, kdim=None, vdim=None):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, kdim=kdim, vdim=vdim)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
| 20,183 | 48.349633 | 179 | py |
Unimer | Unimer-master/neural_models/modules/grammar_copy_decoder_2.py | # coding=utf-8
import torch
import copy
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from overrides import overrides
from allennlp.modules import Embedding
from typing import Tuple, List, Dict
from .. import utils as nn_utils
class LSTMGrammarCopyDecoder(nn.Module):
def __init__(self,
grammar,
ast_class,
lstm_hidden_dim: int,
num_lstm_layers: int,
rule_pad_index: int,
rule_embedding_dim: int,
nonterminal_pad_index: int,
nonterminal_end_index: int,
nonterminal_embedding_dim: int,
source_encoding_dim: int,
dropout: float,
max_target_length: int,
):
super().__init__()
self._grammar = grammar
self._root_rule = grammar.get_production_rule_by_id(grammar.root_rule_id)
self._ast_class = ast_class
self._lstm_hidden_dim = lstm_hidden_dim
self._num_lstm_layers = num_lstm_layers
# Production Rules + PAD Rule
self._rule_pad_index = rule_pad_index
self._num_rules = grammar.num_rules + 1
self._rule_embedding_dim = rule_embedding_dim
print("Rule Pad Index: ", self._rule_pad_index)
# Non-Terminals + PAD Node
self._nonterminal_end_index = nonterminal_end_index
self._nonterminal_pad_index = nonterminal_pad_index
self._num_nonterminals = grammar.num_non_terminals + 2
self._nonterminal_embedding_dim = nonterminal_embedding_dim
print("Non-Terminal Pad Index: ", self._nonterminal_pad_index)
print("Non-Terminal End Index: ", self._nonterminal_end_index)
self._source_encoding_dim = source_encoding_dim
self._max_target_length = max_target_length
self._transform_encodings_key = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
self._transform_encodings_value = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
# Input: (Attention Context + Previous Rule Embedding + Current Nonterminal Embedding)
decode_lstm_input_dim = lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim
self._decoder_lstm = nn.LSTM(
input_size=decode_lstm_input_dim,
hidden_size=lstm_hidden_dim,
num_layers=num_lstm_layers,
batch_first=False
)
self._attn_dropout = nn.Dropout(p=dropout)
self._decode_dropout = nn.Dropout(p=dropout)
self._rule_embedder = Embedding(self._num_rules, rule_embedding_dim)
self._nonterminal_embedder = Embedding(self._num_nonterminals, nonterminal_embedding_dim)
self._attention_hidden_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim + lstm_hidden_dim, lstm_hidden_dim),
nn.Tanh(),
)
# Rule Predictions
self._rule_prediction_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, rule_embedding_dim),
# nn.Tanh()
)
self._rule_prediction_bias = nn.Parameter(
torch.FloatTensor(self._num_rules).zero_())
self._copy_gate_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, 1),
nn.Sigmoid()
)
self._transform_for_copy_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, source_encoding_dim)
)
@overrides
def forward(self,
encodings: torch.Tensor,
source_mask: torch.Tensor,
source_token_copy_indices: torch.Tensor,
target_rules: torch.Tensor,
target_nonterminals: torch.Tensor,
target_mask: torch.Tensor,
target_allow_copy_mask: torch.Tensor,
meta_field: List[Dict] = None,
):
"""
:param encodings: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:param source_token_copy_indices: (batch_size, length, max_linked_rule_num)
:param target_rules: (batch_size, target_length)
:param target_nonterminals: (batch_size, target_length)
:param target_mask: (batch_size, target_length)
:param target_allow_copy_mask: (batch_size, target_length)
"""
if self.training:
output_dict = self.train_decode(encodings, source_mask, source_token_copy_indices,
target_rules, target_nonterminals, target_mask, target_allow_copy_mask)
else:
output_dict = self.eval_decode(
encodings, source_mask, source_token_copy_indices)
return output_dict
def compute_copy_probs(self, encodings, source_mask, attention_vector):
"""
:param encodings: (length, hidden_dim)
:param source_mask: (length,)
:param attention_vector: (hidden_dim)
"""
# Attention
# (1, hidden_dim)
unsqueezed_attention_vector = self._transform_for_copy_layer(attention_vector).unsqueeze(0)
weights = unsqueezed_attention_vector.mm(encodings.permute(1, 0)).squeeze(0)
weights = weights.masked_fill((1 - source_mask).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
return weights
def train_decode(self, encodings, source_mask, source_token_copy_indices, target_rules, target_nonterminals, target_mask, target_allow_copy_mask):
source_length = encodings.size(1)
batch_size, target_length = target_rules.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_probs = list()
for ts in range(target_length - 1):
# Input
# (batch_size, 1, rule_embedding_size)
prev_rule_embedded = self._rule_embedder(target_rules[:, ts].unsqueeze(1).long())
prev_embedded = prev_rule_embedded
# (batch_size, 1, nonterminal_embedding_size)
curr_nonterminal_embedded = self._nonterminal_embedder(target_nonterminals[:, ts].unsqueeze(1).long())
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state, attention_weights = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
# (batch_size, ts + 1, length)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
curr_rule_probs = list()
for bidx in range(batch_size):
# Keep Valid Rule
nonterminal_id = int(target_nonterminals[bidx, ts])
if nonterminal_id == self._nonterminal_pad_index or nonterminal_id == self._nonterminal_end_index:
active_rule_ids = [0]
else:
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal_id(nonterminal_id)
# (num_rules)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
probs = F.softmax(rule_scores[bidx, :].masked_fill(
(1 - active_rule_mask).bool(), float('-inf')), dim=-1)
if target_allow_copy_mask[bidx, ts] == 1:
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = attention_weights[bidx, 0, :]
# copy_scores = self.compute_copy_probs(
# encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# There is a chance that we can copy from source
# num_rules
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) * one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * probs
curr_rule_probs.append(probs)
curr_rule_probs = torch.stack(curr_rule_probs, dim=0)
rule_probs.append(curr_rule_probs)
rule_probs = torch.stack(rule_probs, dim=0).permute(1, 0, 2)
# Loss
loss = self.get_loss(rule_probs=rule_probs, target_rules=target_rules[:, 1:].long(), target_mask=target_mask[:, 1:].float())
# Predicted Labels
_, predicted_rules = rule_probs.max(dim=-1)
output_dict = {"loss": loss, "predicted_rules": predicted_rules}
return output_dict
def eval_decode(self, encodings, source_mask, source_token_copy_indices):
batch_size, source_length, _ = encodings.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_pad_index_tensor = torch.Tensor([self._rule_pad_index]).long().to(encodings.device)
nonterminal_pad_index_tensor = torch.Tensor([self._nonterminal_pad_index]).long().to(encodings.device)
ast_results, is_complete, recorded_copy_gates, recorded_copy_weights = list(), list(), list(), list()
for i in range(batch_size):
ast_results.append(self._ast_class(root_rule=self._root_rule))
is_complete.append(False)
for ts in range(self._max_target_length):
prev_embedded = list()
curr_nonterminal_embedded = list()
for bidx, ast in enumerate(ast_results):
if is_complete[bidx]:
# PAD
prev_embedded.append(self._rule_embedder(rule_pad_index_tensor))
curr_nonterminal_embedded.append(self._nonterminal_embedder(nonterminal_pad_index_tensor))
else:
last_production_rule = ast.get_last_production_rule()
# Rule
rule_index_tensor = torch.Tensor([last_production_rule.rule_id]).long().to(encodings.device)
prev_embedded.append(self._rule_embedder(rule_index_tensor))
# Curr Non-Terminal
curr_non_terminal_id = self._grammar.get_non_terminal_id(ast.get_curr_non_terminal())
nonterminal_index_tensor = torch.Tensor([curr_non_terminal_id]).long().to(encodings.device)
curr_nonterminal_embedded.append(
self._nonterminal_embedder(nonterminal_index_tensor)
)
# (batch_size, 1, rule_embedding_size)
prev_embedded = torch.stack(prev_embedded, dim=0)
# (batch_size, 1, type_embedding_size)
curr_nonterminal_embedded = torch.stack(curr_nonterminal_embedded, dim=0)
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state, attention_weights = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
recorded_copy_gates.append(copy_gate.squeeze(1))
# (batch_size, source_length)
batch_copy_scores = attention_weights.squeeze(dim=1)
recorded_copy_weights.append(batch_copy_scores)
is_finish = True
for bidx, ast in enumerate(ast_results):
if not is_complete[bidx]:
curr_non_terminal = ast.get_curr_non_terminal()
# Rule
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal(curr_non_terminal)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
brule_scores = rule_scores[bidx, :].masked_fill((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(brule_scores, dim=-1)
if curr_non_terminal in self._grammar.copy_terminal_set:
# TODO examinze
# Copy
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = attention_weights[bidx, 0, :]
# copy_scores = self.compute_copy_probs(
# encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# There is a chance that we can copy from source
# (num_rules)
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) *
one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
curr_rule_probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * curr_rule_probs
rule_id = int(torch.argmax(curr_rule_probs))
production_rule = self._grammar.get_production_rule_by_id(rule_id)
ast.add_rule(production_rule)
if ast.is_complete:
is_complete[bidx] = True
else:
is_finish = False
if is_finish:
break
# Pad For evaluation
predicted_rules = list()
max_length = 0
for ast in ast_results:
rules = ast.get_production_rules()
rule_ids = [rule.rule_id for rule in rules]
predicted_rules.append(np.array(rule_ids, dtype=int))
if len(rules) > max_length:
max_length = len(rules)
# Pad
for i in range(batch_size):
if len(predicted_rules[i]) < max_length:
predicted_rules[i] = np.concatenate(
[predicted_rules[i], np.ones(max_length - len(predicted_rules[i])) * self._rule_pad_index],
axis=0
)
predicted_rules = torch.from_numpy(np.array(predicted_rules, dtype=int)).to(encodings.device)
recorded_copy_gates = torch.stack(recorded_copy_gates, dim=0).transpose(dim0=1, dim1=0)
recorded_copy_weights = torch.stack(recorded_copy_weights, dim=0).permute(1, 0, 2)
output_dict = {
"loss": torch.Tensor([0.0]).to(encodings.device),
"predicted_rules": predicted_rules.long(),
"recorded_copy_gates": recorded_copy_gates,
"recorded_copy_weights": recorded_copy_weights
}
return output_dict
def take_decode_step(self,
source_encoding_key: torch.Tensor,
source_encoding_value: torch.Tensor,
source_mask: torch.Tensor,
decoder_inputs: torch.Tensor,
decoder_hidden_state: Tuple[torch.Tensor, torch.Tensor],
):
"""
:param source_encoding_key: (batch_size, length, hidden_dim)
:param source_encoding_value: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:decoder_inputs: (batch_size, 1, lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim)
:decoder_hidden_state: (h, c)
:return
decoder_outputs: (batch_size, 1, lstm_hidden_dim)
context: (batch_size, 1, hidden_dim)
att: (batch_size, 1, lstm_hidden_dim)
decoder_hidden_state: (h, c)
"""
decoder_outputs, (h, c) = self._decoder_lstm(decoder_inputs.permute(1, 0, 2), decoder_hidden_state)
decoder_hidden_state = (h, c)
# (batch_size, 1, lstm_hidden_dim)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
# Attention
# (batch_size, 1, length)
raw_weights = decoder_outputs.bmm(source_encoding_key.permute(0, 2, 1))
weights = raw_weights.masked_fill((1 - source_mask.unsqueeze(1)).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
# (batch_size, 1, hidden_dim)
context = weights.bmm(source_encoding_value)
att = self._attention_hidden_layer(torch.cat([decoder_outputs, context], dim=-1))
att = self._attn_dropout(att)
return decoder_outputs, context, att, decoder_hidden_state, raw_weights
def get_loss(self,
rule_probs: torch.FloatTensor,
target_rules: torch.LongTensor,
target_mask: torch.FloatTensor,
):
"""
:param rule_probs (batch_size, target_length, num_rules)
:param target_mask (batch_size, target_length)
"""
batch_size, target_length = target_rules.size()
rule_probs = torch.gather(
rule_probs.reshape(-1, self._num_rules),
dim=1,
index=target_rules.reshape(-1).unsqueeze(-1).long()
)
rule_probs = rule_probs.reshape(batch_size, target_length)
rule_log_probs = (rule_probs + 1e-10).log()
rule_log_probs *= target_mask.float()
rule_normalize_factor = target_mask.sum(-1)
rule_normalize_factor[rule_normalize_factor == 0] = 1
rule_loss = rule_log_probs.sum(-1) / rule_normalize_factor.float()
rule_loss = -1 * (rule_loss.sum() / batch_size)
return rule_loss
| 20,773 | 46.429224 | 150 | py |
Unimer | Unimer-master/neural_models/modules/grammar_copy_decoder.py | # coding=utf-8
import torch
import copy
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from overrides import overrides
from allennlp.modules import Embedding
from typing import Tuple, List, Dict
from .. import utils as nn_utils
class LSTMGrammarCopyDecoder(nn.Module):
def __init__(self,
grammar,
ast_class,
lstm_hidden_dim: int,
num_lstm_layers: int,
rule_pad_index: int,
rule_embedding_dim: int,
nonterminal_pad_index: int,
nonterminal_end_index: int,
nonterminal_embedding_dim: int,
source_encoding_dim: int,
dropout: float,
max_target_length: int,
):
super().__init__()
self._grammar = grammar
self._root_rule = grammar.get_production_rule_by_id(grammar.root_rule_id)
self._ast_class = ast_class
self._lstm_hidden_dim = lstm_hidden_dim
self._num_lstm_layers = num_lstm_layers
# Production Rules + PAD Rule
self._rule_pad_index = rule_pad_index
self._num_rules = grammar.num_rules + 1
self._rule_embedding_dim = rule_embedding_dim
print("Rule Pad Index: ", self._rule_pad_index)
# Non-Terminals + PAD Node
self._nonterminal_end_index = nonterminal_end_index
self._nonterminal_pad_index = nonterminal_pad_index
self._num_nonterminals = grammar.num_non_terminals + 2
self._nonterminal_embedding_dim = nonterminal_embedding_dim
print("Non-Terminal Pad Index: ", self._nonterminal_pad_index)
print("Non-Terminal End Index: ", self._nonterminal_end_index)
self._source_encoding_dim = source_encoding_dim
self._max_target_length = max_target_length
self._transform_encodings_key = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
self._transform_encodings_value = nn.Linear(source_encoding_dim, self._lstm_hidden_dim)
# Input: (Attention Context + Previous Rule Embedding + Current Nonterminal Embedding)
decode_lstm_input_dim = lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim
self._decoder_lstm = nn.LSTM(
input_size=decode_lstm_input_dim,
hidden_size=lstm_hidden_dim,
num_layers=num_lstm_layers,
batch_first=False
)
self._attn_dropout = nn.Dropout(p=dropout)
self._decode_dropout = nn.Dropout(p=dropout)
self._rule_embedder = Embedding(self._num_rules, rule_embedding_dim)
self._nonterminal_embedder = Embedding(self._num_nonterminals, nonterminal_embedding_dim)
self._attention_hidden_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim + lstm_hidden_dim, lstm_hidden_dim),
nn.Tanh(),
)
# Rule Predictions
self._rule_prediction_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, rule_embedding_dim),
# nn.Tanh()
)
self._rule_prediction_bias = nn.Parameter(
torch.FloatTensor(self._num_rules).zero_())
self._copy_gate_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, 1),
nn.Sigmoid()
)
self._transform_for_copy_layer = nn.Sequential(
nn.Linear(lstm_hidden_dim, source_encoding_dim)
)
@overrides
def forward(self,
encodings: torch.Tensor,
source_mask: torch.Tensor,
source_token_copy_indices: torch.Tensor,
target_rules: torch.Tensor,
target_nonterminals: torch.Tensor,
target_mask: torch.Tensor,
target_allow_copy_mask: torch.Tensor,
meta_field: List[Dict] = None,
):
"""
:param encodings: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:param source_token_copy_indices: (batch_size, length, max_linked_rule_num)
:param target_rules: (batch_size, target_length)
:param target_nonterminals: (batch_size, target_length)
:param target_mask: (batch_size, target_length)
:param target_allow_copy_mask: (batch_size, target_length)
"""
if self.training:
output_dict = self.train_decode(encodings, source_mask, source_token_copy_indices,
target_rules, target_nonterminals, target_mask, target_allow_copy_mask)
else:
output_dict = self.eval_decode(
encodings, source_mask, source_token_copy_indices)
return output_dict
def compute_copy_probs(self, encodings, source_mask, attention_vector):
"""
:param encodings: (length, hidden_dim)
:param source_mask: (length,)
:param attention_vector: (hidden_dim)
"""
# Attention
# (1, hidden_dim)
unsqueezed_attention_vector = self._transform_for_copy_layer(attention_vector).unsqueeze(0)
weights = unsqueezed_attention_vector.mm(encodings.permute(1, 0)).squeeze(0)
weights = weights.masked_fill((1 - source_mask).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
return weights
def train_decode(self, encodings, source_mask, source_token_copy_indices, target_rules, target_nonterminals, target_mask, target_allow_copy_mask):
source_length = encodings.size(1)
batch_size, target_length = target_rules.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_probs = list()
for ts in range(target_length - 1):
# Input
# (batch_size, 1, rule_embedding_size)
prev_rule_embedded = self._rule_embedder(target_rules[:, ts].unsqueeze(1).long())
prev_embedded = prev_rule_embedded
# (batch_size, 1, nonterminal_embedding_size)
curr_nonterminal_embedded = self._nonterminal_embedder(target_nonterminals[:, ts].unsqueeze(1).long())
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
# (batch_size, ts + 1, length)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
curr_rule_probs = list()
for bidx in range(batch_size):
# Keep Valid Rule
nonterminal_id = int(target_nonterminals[bidx, ts])
if nonterminal_id == self._nonterminal_pad_index or nonterminal_id == self._nonterminal_end_index:
active_rule_ids = [0]
else:
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal_id(nonterminal_id)
# (num_rules)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
probs = F.softmax(rule_scores[bidx, :].masked_fill(
(1 - active_rule_mask).bool(), float('-inf')), dim=-1)
if target_allow_copy_mask[bidx, ts] == 1:
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = self.compute_copy_probs(
encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# There is a chance that we can copy from source
# num_rules
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) * one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * probs
curr_rule_probs.append(probs)
curr_rule_probs = torch.stack(curr_rule_probs, dim=0)
rule_probs.append(curr_rule_probs)
rule_probs = torch.stack(rule_probs, dim=0).permute(1, 0, 2)
# Loss
loss = self.get_loss(rule_probs=rule_probs, target_rules=target_rules[:, 1:].long(), target_mask=target_mask[:, 1:].float())
# Predicted Labels
_, predicted_rules = rule_probs.max(dim=-1)
output_dict = {"loss": loss, "predicted_rules": predicted_rules}
return output_dict
def eval_decode(self, encodings, source_mask, source_token_copy_indices):
batch_size, source_length, _ = encodings.size()
prev_attention_context = encodings.new_zeros((batch_size, 1, self._lstm_hidden_dim))
source_encoding_key, source_encoding_value = self._transform_encodings_key(encodings), self._transform_encodings_value(encodings)
h = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
c = encodings.new_zeros([self._num_lstm_layers, batch_size, self._lstm_hidden_dim])
decoder_hidden_state = (h, c)
rule_pad_index_tensor = torch.Tensor([self._rule_pad_index]).long().to(encodings.device)
nonterminal_pad_index_tensor = torch.Tensor([self._nonterminal_pad_index]).long().to(encodings.device)
ast_results, is_complete, recorded_copy_gates, recorded_copy_weights = list(), list(), list(), list()
for i in range(batch_size):
ast_results.append(self._ast_class(root_rule=self._root_rule))
is_complete.append(False)
for ts in range(self._max_target_length):
prev_embedded = list()
curr_nonterminal_embedded = list()
for bidx, ast in enumerate(ast_results):
if is_complete[bidx]:
# PAD
prev_embedded.append(self._rule_embedder(rule_pad_index_tensor))
curr_nonterminal_embedded.append(self._nonterminal_embedder(nonterminal_pad_index_tensor))
else:
last_production_rule = ast.get_last_production_rule()
# Rule
rule_index_tensor = torch.Tensor([last_production_rule.rule_id]).long().to(encodings.device)
prev_embedded.append(self._rule_embedder(rule_index_tensor))
# Curr Non-Terminal
curr_non_terminal_id = self._grammar.get_non_terminal_id(ast.get_curr_non_terminal())
nonterminal_index_tensor = torch.Tensor([curr_non_terminal_id]).long().to(encodings.device)
curr_nonterminal_embedded.append(
self._nonterminal_embedder(nonterminal_index_tensor)
)
# (batch_size, 1, rule_embedding_size)
prev_embedded = torch.stack(prev_embedded, dim=0)
# (batch_size, 1, type_embedding_size)
curr_nonterminal_embedded = torch.stack(curr_nonterminal_embedded, dim=0)
decoder_inputs = torch.cat([prev_embedded, curr_nonterminal_embedded, prev_attention_context], dim=-1)
# Step
decoder_outputs, context, attention_vector, decoder_hidden_state = self.take_decode_step(
source_encoding_key,
source_encoding_value,
source_mask,
decoder_inputs,
decoder_hidden_state
)
prev_attention_context = attention_vector
# Production Rules
# (batch_size, num_rules)
rule_scores = F.linear(
self._rule_prediction_layer(attention_vector.squeeze(1)),
weight=self._rule_embedder.weight,
bias=self._rule_prediction_bias
)
# Copy Gate
# (batch_size, 1)
copy_gate = self._copy_gate_layer(attention_vector.squeeze(1))
recorded_copy_gates.append(copy_gate.squeeze(1))
# (batch_size, source_length)
batch_copy_scores = copy_gate.new_zeros((batch_size, source_length))
recorded_copy_weights.append(batch_copy_scores)
is_finish = True
for bidx, ast in enumerate(ast_results):
if not is_complete[bidx]:
curr_non_terminal = ast.get_curr_non_terminal()
# Rule
active_rule_ids = self._grammar.get_production_rule_ids_by_nonterminal(curr_non_terminal)
active_rule_mask = nn_utils.get_one_hot_mask(self._num_rules, active_rule_ids).to(rule_scores.device)
brule_scores = rule_scores[bidx, :].masked_fill((1 - active_rule_mask).bool(), float('-inf'))
curr_rule_probs = F.softmax(brule_scores, dim=-1)
if curr_non_terminal in self._grammar.copy_terminal_set:
# TODO examinze
# Copy
# (source_length, max_linked_rule_num)
token_copy_indices = source_token_copy_indices[bidx]
# (source_length, num_rules)
one_hot_token_copy_indices = (torch.sum(
torch.nn.functional.one_hot(token_copy_indices, self._num_rules), dim=1) > 0).float()
if torch.sum((torch.sum(one_hot_token_copy_indices, dim=0) > 0).float() * active_rule_mask.float()) > 0:
# allow soft copy
copy_score_gate = copy_gate.squeeze(-1)[bidx]
# (source_length)
copy_scores = self.compute_copy_probs(
encodings[bidx, :, :], source_mask[bidx, :], attention_vector.squeeze(1)[bidx, :])
# For Copy Analysis
batch_copy_scores[bidx, :] = copy_scores
# There is a chance that we can copy from source
# (num_rules)
copy_scores = torch.sum(
copy_scores.unsqueeze(-1) *
one_hot_token_copy_indices.float(),
dim=0
)
copy_scores.masked_fill_(
(1 - active_rule_mask).bool(), float('-inf'))
normalized_copy_scores = F.softmax(copy_scores, dim=-1)
# Score
curr_rule_probs = copy_score_gate * normalized_copy_scores + \
(1 - copy_score_gate) * curr_rule_probs
rule_id = int(torch.argmax(curr_rule_probs))
production_rule = self._grammar.get_production_rule_by_id(rule_id)
ast.add_rule(production_rule)
if ast.is_complete:
is_complete[bidx] = True
else:
is_finish = False
if is_finish:
break
# Pad For evaluation
predicted_rules = list()
max_length = 0
for ast in ast_results:
rules = ast.get_production_rules()
rule_ids = [rule.rule_id for rule in rules]
predicted_rules.append(np.array(rule_ids, dtype=int))
if len(rules) > max_length:
max_length = len(rules)
# Pad
for i in range(batch_size):
if len(predicted_rules[i]) < max_length:
predicted_rules[i] = np.concatenate(
[predicted_rules[i], np.ones(max_length - len(predicted_rules[i])) * self._rule_pad_index],
axis=0
)
predicted_rules = torch.from_numpy(np.array(predicted_rules, dtype=int)).to(encodings.device)
recorded_copy_gates = torch.stack(recorded_copy_gates, dim=0).transpose(dim0=1, dim1=0)
recorded_copy_weights = torch.stack(recorded_copy_weights, dim=0).permute(1, 0, 2)
output_dict = {
"loss": torch.Tensor([0.0]).to(encodings.device),
"predicted_rules": predicted_rules.long(),
"recorded_copy_gates": recorded_copy_gates,
"recorded_copy_weights": recorded_copy_weights
}
return output_dict
def take_decode_step(self,
source_encoding_key: torch.Tensor,
source_encoding_value: torch.Tensor,
source_mask: torch.Tensor,
decoder_inputs: torch.Tensor,
decoder_hidden_state: Tuple[torch.Tensor, torch.Tensor],
):
"""
:param source_encoding_key: (batch_size, length, hidden_dim)
:param source_encoding_value: (batch_size, length, hidden_dim)
:param source_mask: (batch_size, length)
:decoder_inputs: (batch_size, 1, lstm_hidden_dim + rule_embedding_dim + nonterminal_embedding_dim)
:decoder_hidden_state: (h, c)
:return
decoder_outputs: (batch_size, 1, lstm_hidden_dim)
context: (batch_size, 1, hidden_dim)
att: (batch_size, 1, lstm_hidden_dim)
decoder_hidden_state: (h, c)
"""
decoder_outputs, (h, c) = self._decoder_lstm(decoder_inputs.permute(1, 0, 2), decoder_hidden_state)
decoder_hidden_state = (h, c)
# (batch_size, 1, lstm_hidden_dim)
decoder_outputs = decoder_outputs.permute(1, 0, 2)
# Attention
# (batch_size, 1, length)
weights = decoder_outputs.bmm(source_encoding_key.permute(0, 2, 1))
weights = weights.masked_fill((1 - source_mask.unsqueeze(1)).bool(), float('-inf'))
weights = F.softmax(weights, dim=-1)
# (batch_size, 1, hidden_dim)
context = weights.bmm(source_encoding_value)
att = self._attention_hidden_layer(torch.cat([decoder_outputs, context], dim=-1))
att = self._attn_dropout(att)
return decoder_outputs, context, att, decoder_hidden_state
def get_loss(self,
rule_probs: torch.FloatTensor,
target_rules: torch.LongTensor,
target_mask: torch.FloatTensor,
):
"""
:param rule_probs (batch_size, target_length, num_rules)
:param target_mask (batch_size, target_length)
"""
batch_size, target_length = target_rules.size()
rule_probs = torch.gather(
rule_probs.reshape(-1, self._num_rules),
dim=1,
index=target_rules.reshape(-1).unsqueeze(-1).long()
)
rule_probs = rule_probs.reshape(batch_size, target_length)
rule_log_probs = (rule_probs + 1e-10).log()
rule_log_probs *= target_mask.float()
rule_normalize_factor = target_mask.sum(-1)
rule_normalize_factor[rule_normalize_factor == 0] = 1
rule_loss = rule_log_probs.sum(-1) / rule_normalize_factor.float()
rule_loss = -1 * (rule_loss.sum() / batch_size)
return rule_loss
| 20,697 | 46.363844 | 150 | py |
Unimer | Unimer-master/metrics/sequency_accuracy.py | # coding=utf8
import torch
from overrides import overrides
from allennlp.training.metrics import Metric
from typing import Union, Tuple, Dict, List, Optional
class SequenceAccuracy(Metric):
def __init__(self) -> None:
self._correct_counts = 0.
self._total_counts = 0.
self._pad_index = -1
def __call__(self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: torch.Tensor) -> None:
batch_size, p_len = predictions.size()
batch_size, g_len = gold_labels.size()
if p_len >= g_len:
_predictions = predictions[:, :g_len]
else:
_predictions = torch.cat((predictions, predictions.new_ones(batch_size, g_len - p_len) * self._pad_index),
dim=-1)
assert _predictions.size(1) == g_len
masked_predictions = _predictions * mask
masked_gold_labels = gold_labels * mask
eqs = masked_gold_labels.eq(masked_predictions).int()
result = (eqs.sum(-1) == g_len).int()
self._correct_counts += result.sum()
self._total_counts += batch_size
@overrides
def get_metric(self, reset: bool) -> Union[float, Tuple[float, ...], Dict[str, float], Dict[str, List[float]]]:
"""
Returns
-------
The accumulated accuracy.
"""
if self._total_counts > 0:
accuracy = float(self._correct_counts) / float(self._total_counts)
else:
accuracy = 0
if reset:
self.reset()
return {'accuracy': accuracy}
@overrides
def reset(self) -> None:
self._correct_counts = 0.
self._total_counts = 0.
| 1,684 | 29.636364 | 118 | py |
seld-dcase2023 | seld-dcase2023-main/visualize_seldnet_output.py | #
# A wrapper script that trains the SELDnet. The training stops when the early stopping metric - SELD error stops improving.
#
import numpy as np
import os
import sys
import cls_data_generator
import seldnet_model
import parameters
import torch
from IPython import embed
import matplotlib
matplotlib.use('Agg')
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plot
plot.rcParams.update({'font.size': 22})
def main(argv):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameters.get_params(task_id)
print('\nLoading the best model and predicting results on the testing split')
print('\tLoading testing dataset:')
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=1, shuffle=False, is_eval=True if params['mode']=='eval' else False
)
data_in, data_out = data_gen_test.get_data_sizes()
dump_figures = True
# CHOOSE THE MODEL WHOSE OUTPUT YOU WANT TO VISUALIZE
checkpoint_name = "models/1_1_foa_dev_split6_model.h5"
model = seldnet_model.SeldModel(data_in, data_out, params)
model.eval()
model.load_state_dict(torch.load(checkpoint_name, map_location=torch.device('cpu')))
model = model.to(device)
if dump_figures:
dump_folder = os.path.join('dump_dir', os.path.basename(checkpoint_name).split('.')[0])
os.makedirs(dump_folder, exist_ok=True)
with torch.no_grad():
file_cnt = 0
for data, target in data_gen_test.generate():
data, target = torch.tensor(data).to(device).float(), torch.tensor(target).to(device).float()
output = model(data)
# (batch, sequence, max_nb_doas*3) to (batch, sequence, 3, max_nb_doas)
max_nb_doas = output.shape[2]//3
output = output.view(output.shape[0], output.shape[1], 3, max_nb_doas).transpose(-1, -2)
target = target.view(target.shape[0], target.shape[1], 3, max_nb_doas).transpose(-1, -2)
# get pair-wise distance matrix between predicted and reference.
output, target = output.view(-1, output.shape[-2], output.shape[-1]), target.view(-1, target.shape[-2], target.shape[-1])
output = output.cpu().detach().numpy()
target = target.cpu().detach().numpy()
use_activity_detector = False
if use_activity_detector:
activity = (torch.sigmoid(activity_out).cpu().detach().numpy() >0.5)
mel_spec = data[0][0].cpu()
foa_iv = data[0][-1].cpu()
target[target > 1] =0
plot.figure(figsize=(20,10))
plot.subplot(321), plot.imshow(torch.transpose(mel_spec, -1, -2))
plot.subplot(322), plot.imshow(torch.transpose(foa_iv, -1, -2))
plot.subplot(323), plot.plot(target[:params['label_sequence_length'], 0, 0], 'r', lw=2)
plot.subplot(323), plot.plot(target[:params['label_sequence_length'], 0, 1], 'g', lw=2)
plot.subplot(323), plot.plot(target[:params['label_sequence_length'], 0, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
plot.subplot(324), plot.plot(target[:params['label_sequence_length'], 1, 0], 'r', lw=2)
plot.subplot(324), plot.plot(target[:params['label_sequence_length'], 1, 1], 'g', lw=2)
plot.subplot(324), plot.plot(target[:params['label_sequence_length'], 1, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
if use_activity_detector:
output[:, 0, 0:3] = activity[:, 0][:, np.newaxis]*output[:, 0, 0:3]
output[:, 1, 0:3] = activity[:, 1][:, np.newaxis]*output[:, 1, 0:3]
plot.subplot(325), plot.plot(output[:params['label_sequence_length'], 0, 0], 'r', lw=2)
plot.subplot(325), plot.plot(output[:params['label_sequence_length'], 0, 1], 'g', lw=2)
plot.subplot(325), plot.plot(output[:params['label_sequence_length'], 0, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
plot.subplot(326), plot.plot(output[:params['label_sequence_length'], 1, 0], 'r', lw=2)
plot.subplot(326), plot.plot(output[:params['label_sequence_length'], 1, 1], 'g', lw=2)
plot.subplot(326), plot.plot(output[:params['label_sequence_length'], 1, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
if dump_figures:
fig_name = '{}'.format(os.path.join(dump_folder, '{}.png'.format(file_cnt)))
print('saving figure : {}'.format(fig_name))
plot.savefig(fig_name, dpi=100)
plot.close()
file_cnt += 1
else:
plot.show()
if file_cnt>2:
break
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| 5,100 | 41.157025 | 133 | py |
seld-dcase2023 | seld-dcase2023-main/seldnet_model.py | # The SELDnet architecture
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from IPython import embed
class MSELoss_ADPIT(object):
def __init__(self):
super().__init__()
self._each_loss = nn.MSELoss(reduction='none')
def _each_calc(self, output, target):
return self._each_loss(output, target).mean(dim=(2)) # class-wise frame-level
def __call__(self, output, target):
"""
Auxiliary Duplicating Permutation Invariant Training (ADPIT) for 13 (=1+6+6) possible combinations
Args:
output: [batch_size, frames, num_track*num_axis*num_class=3*3*12]
target: [batch_size, frames, num_track_dummy=6, num_axis=4, num_class=12]
Return:
loss: scalar
"""
target_A0 = target[:, :, 0, 0:1, :] * target[:, :, 0, 1:, :] # A0, no ov from the same class, [batch_size, frames, num_axis(act)=1, num_class=12] * [batch_size, frames, num_axis(XYZ)=3, num_class=12]
target_B0 = target[:, :, 1, 0:1, :] * target[:, :, 1, 1:, :] # B0, ov with 2 sources from the same class
target_B1 = target[:, :, 2, 0:1, :] * target[:, :, 2, 1:, :] # B1
target_C0 = target[:, :, 3, 0:1, :] * target[:, :, 3, 1:, :] # C0, ov with 3 sources from the same class
target_C1 = target[:, :, 4, 0:1, :] * target[:, :, 4, 1:, :] # C1
target_C2 = target[:, :, 5, 0:1, :] * target[:, :, 5, 1:, :] # C2
target_A0A0A0 = torch.cat((target_A0, target_A0, target_A0), 2) # 1 permutation of A (no ov from the same class), [batch_size, frames, num_track*num_axis=3*3, num_class=12]
target_B0B0B1 = torch.cat((target_B0, target_B0, target_B1), 2) # 6 permutations of B (ov with 2 sources from the same class)
target_B0B1B0 = torch.cat((target_B0, target_B1, target_B0), 2)
target_B0B1B1 = torch.cat((target_B0, target_B1, target_B1), 2)
target_B1B0B0 = torch.cat((target_B1, target_B0, target_B0), 2)
target_B1B0B1 = torch.cat((target_B1, target_B0, target_B1), 2)
target_B1B1B0 = torch.cat((target_B1, target_B1, target_B0), 2)
target_C0C1C2 = torch.cat((target_C0, target_C1, target_C2), 2) # 6 permutations of C (ov with 3 sources from the same class)
target_C0C2C1 = torch.cat((target_C0, target_C2, target_C1), 2)
target_C1C0C2 = torch.cat((target_C1, target_C0, target_C2), 2)
target_C1C2C0 = torch.cat((target_C1, target_C2, target_C0), 2)
target_C2C0C1 = torch.cat((target_C2, target_C0, target_C1), 2)
target_C2C1C0 = torch.cat((target_C2, target_C1, target_C0), 2)
output = output.reshape(output.shape[0], output.shape[1], target_A0A0A0.shape[2], target_A0A0A0.shape[3]) # output is set the same shape of target, [batch_size, frames, num_track*num_axis=3*3, num_class=12]
pad4A = target_B0B0B1 + target_C0C1C2
pad4B = target_A0A0A0 + target_C0C1C2
pad4C = target_A0A0A0 + target_B0B0B1
loss_0 = self._each_calc(output, target_A0A0A0 + pad4A) # padded with target_B0B0B1 and target_C0C1C2 in order to avoid to set zero as target
loss_1 = self._each_calc(output, target_B0B0B1 + pad4B) # padded with target_A0A0A0 and target_C0C1C2
loss_2 = self._each_calc(output, target_B0B1B0 + pad4B)
loss_3 = self._each_calc(output, target_B0B1B1 + pad4B)
loss_4 = self._each_calc(output, target_B1B0B0 + pad4B)
loss_5 = self._each_calc(output, target_B1B0B1 + pad4B)
loss_6 = self._each_calc(output, target_B1B1B0 + pad4B)
loss_7 = self._each_calc(output, target_C0C1C2 + pad4C) # padded with target_A0A0A0 and target_B0B0B1
loss_8 = self._each_calc(output, target_C0C2C1 + pad4C)
loss_9 = self._each_calc(output, target_C1C0C2 + pad4C)
loss_10 = self._each_calc(output, target_C1C2C0 + pad4C)
loss_11 = self._each_calc(output, target_C2C0C1 + pad4C)
loss_12 = self._each_calc(output, target_C2C1C0 + pad4C)
loss_min = torch.min(
torch.stack((loss_0,
loss_1,
loss_2,
loss_3,
loss_4,
loss_5,
loss_6,
loss_7,
loss_8,
loss_9,
loss_10,
loss_11,
loss_12), dim=0),
dim=0).indices
loss = (loss_0 * (loss_min == 0) +
loss_1 * (loss_min == 1) +
loss_2 * (loss_min == 2) +
loss_3 * (loss_min == 3) +
loss_4 * (loss_min == 4) +
loss_5 * (loss_min == 5) +
loss_6 * (loss_min == 6) +
loss_7 * (loss_min == 7) +
loss_8 * (loss_min == 8) +
loss_9 * (loss_min == 9) +
loss_10 * (loss_min == 10) +
loss_11 * (loss_min == 11) +
loss_12 * (loss_min == 12)).mean()
return loss
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = F.relu(self.bn(self.conv(x)))
return x
class PositionalEmbedding(nn.Module): # Not used in the baseline
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class SeldModel(torch.nn.Module):
def __init__(self, in_feat_shape, out_shape, params):
super().__init__()
self.nb_classes = params['unique_classes']
self.params=params
self.conv_block_list = nn.ModuleList()
if len(params['f_pool_size']):
for conv_cnt in range(len(params['f_pool_size'])):
self.conv_block_list.append(ConvBlock(in_channels=params['nb_cnn2d_filt'] if conv_cnt else in_feat_shape[1], out_channels=params['nb_cnn2d_filt']))
self.conv_block_list.append(nn.MaxPool2d((params['t_pool_size'][conv_cnt], params['f_pool_size'][conv_cnt])))
self.conv_block_list.append(nn.Dropout2d(p=params['dropout_rate']))
self.gru_input_dim = params['nb_cnn2d_filt'] * int(np.floor(in_feat_shape[-1] / np.prod(params['f_pool_size'])))
self.gru = torch.nn.GRU(input_size=self.gru_input_dim, hidden_size=params['rnn_size'],
num_layers=params['nb_rnn_layers'], batch_first=True,
dropout=params['dropout_rate'], bidirectional=True)
# self.pos_embedder = PositionalEmbedding(self.params['rnn_size'])
self.mhsa_block_list = nn.ModuleList()
self.layer_norm_list = nn.ModuleList()
for mhsa_cnt in range(params['nb_self_attn_layers']):
self.mhsa_block_list.append(nn.MultiheadAttention(embed_dim=self.params['rnn_size'], num_heads=params['nb_heads'], dropout=params['dropout_rate'], batch_first=True))
self.layer_norm_list.append(nn.LayerNorm(self.params['rnn_size']))
self.fnn_list = torch.nn.ModuleList()
if params['nb_fnn_layers']:
for fc_cnt in range(params['nb_fnn_layers']):
self.fnn_list.append(nn.Linear(params['fnn_size'] if fc_cnt else self.params['rnn_size'], params['fnn_size'], bias=True))
self.fnn_list.append(nn.Linear(params['fnn_size'] if params['nb_fnn_layers'] else self.params['rnn_size'], out_shape[-1], bias=True))
def forward(self, x):
"""input: (batch_size, mic_channels, time_steps, mel_bins)"""
for conv_cnt in range(len(self.conv_block_list)):
x = self.conv_block_list[conv_cnt](x)
x = x.transpose(1, 2).contiguous()
x = x.view(x.shape[0], x.shape[1], -1).contiguous()
(x, _) = self.gru(x)
x = torch.tanh(x)
x = x[:, :, x.shape[-1]//2:] * x[:, :, :x.shape[-1]//2]
# pos_embedding = self.pos_embedder(x)
# x = x + pos_embedding
for mhsa_cnt in range(len(self.mhsa_block_list)):
x_attn_in = x
x, _ = self.mhsa_block_list[mhsa_cnt](x_attn_in, x_attn_in, x_attn_in)
x = x + x_attn_in
x = self.layer_norm_list[mhsa_cnt](x)
for fnn_cnt in range(len(self.fnn_list) - 1):
x = self.fnn_list[fnn_cnt](x)
doa = torch.tanh(self.fnn_list[-1](x))
return doa
| 9,178 | 48.085561 | 215 | py |
seld-dcase2023 | seld-dcase2023-main/train_seldnet.py | #
# A wrapper script that trains the SELDnet. The training stops when the early stopping metric - SELD error stops improving.
#
import os
import sys
import numpy as np
import matplotlib.pyplot as plot
import cls_feature_class
import cls_data_generator
import seldnet_model
import parameters
import time
from time import gmtime, strftime
import torch
import torch.nn as nn
import torch.optim as optim
plot.switch_backend('agg')
from IPython import embed
from cls_compute_seld_results import ComputeSELDResults, reshape_3Dto2D
from SELD_evaluation_metrics import distance_between_cartesian_coordinates
import seldnet_model
def get_accdoa_labels(accdoa_in, nb_classes):
x, y, z = accdoa_in[:, :, :nb_classes], accdoa_in[:, :, nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:]
sed = np.sqrt(x**2 + y**2 + z**2) > 0.5
return sed, accdoa_in
def get_multi_accdoa_labels(accdoa_in, nb_classes):
"""
Args:
accdoa_in: [batch_size, frames, num_track*num_axis*num_class=3*3*12]
nb_classes: scalar
Return:
sedX: [batch_size, frames, num_class=12]
doaX: [batch_size, frames, num_axis*num_class=3*12]
"""
x0, y0, z0 = accdoa_in[:, :, :1*nb_classes], accdoa_in[:, :, 1*nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:3*nb_classes]
sed0 = np.sqrt(x0**2 + y0**2 + z0**2) > 0.5
doa0 = accdoa_in[:, :, :3*nb_classes]
x1, y1, z1 = accdoa_in[:, :, 3*nb_classes:4*nb_classes], accdoa_in[:, :, 4*nb_classes:5*nb_classes], accdoa_in[:, :, 5*nb_classes:6*nb_classes]
sed1 = np.sqrt(x1**2 + y1**2 + z1**2) > 0.5
doa1 = accdoa_in[:, :, 3*nb_classes: 6*nb_classes]
x2, y2, z2 = accdoa_in[:, :, 6*nb_classes:7*nb_classes], accdoa_in[:, :, 7*nb_classes:8*nb_classes], accdoa_in[:, :, 8*nb_classes:]
sed2 = np.sqrt(x2**2 + y2**2 + z2**2) > 0.5
doa2 = accdoa_in[:, :, 6*nb_classes:]
return sed0, doa0, sed1, doa1, sed2, doa2
def determine_similar_location(sed_pred0, sed_pred1, doa_pred0, doa_pred1, class_cnt, thresh_unify, nb_classes):
if (sed_pred0 == 1) and (sed_pred1 == 1):
if distance_between_cartesian_coordinates(doa_pred0[class_cnt], doa_pred0[class_cnt+1*nb_classes], doa_pred0[class_cnt+2*nb_classes],
doa_pred1[class_cnt], doa_pred1[class_cnt+1*nb_classes], doa_pred1[class_cnt+2*nb_classes]) < thresh_unify:
return 1
else:
return 0
else:
return 0
def test_epoch(data_generator, model, criterion, dcase_output_folder, params, device):
# Number of frames for a 60 second audio with 100ms hop length = 600 frames
# Number of frames in one batch (batch_size* sequence_length) consists of all the 600 frames above with zero padding in the remaining frames
test_filelist = data_generator.get_filelist()
nb_test_batches, test_loss = 0, 0.
model.eval()
file_cnt = 0
with torch.no_grad():
for data, target in data_generator.generate():
# load one batch of data
data, target = torch.tensor(data).to(device).float(), torch.tensor(target).to(device).float()
# process the batch of data based on chosen mode
output = model(data)
loss = criterion(output, target)
if params['multi_accdoa'] is True:
sed_pred0, doa_pred0, sed_pred1, doa_pred1, sed_pred2, doa_pred2 = get_multi_accdoa_labels(output.detach().cpu().numpy(), params['unique_classes'])
sed_pred0 = reshape_3Dto2D(sed_pred0)
doa_pred0 = reshape_3Dto2D(doa_pred0)
sed_pred1 = reshape_3Dto2D(sed_pred1)
doa_pred1 = reshape_3Dto2D(doa_pred1)
sed_pred2 = reshape_3Dto2D(sed_pred2)
doa_pred2 = reshape_3Dto2D(doa_pred2)
else:
sed_pred, doa_pred = get_accdoa_labels(output.detach().cpu().numpy(), params['unique_classes'])
sed_pred = reshape_3Dto2D(sed_pred)
doa_pred = reshape_3Dto2D(doa_pred)
# dump SELD results to the correspondin file
output_file = os.path.join(dcase_output_folder, test_filelist[file_cnt].replace('.npy', '.csv'))
file_cnt += 1
output_dict = {}
if params['multi_accdoa'] is True:
for frame_cnt in range(sed_pred0.shape[0]):
for class_cnt in range(sed_pred0.shape[1]):
# determine whether track0 is similar to track1
flag_0sim1 = determine_similar_location(sed_pred0[frame_cnt][class_cnt], sed_pred1[frame_cnt][class_cnt], doa_pred0[frame_cnt], doa_pred1[frame_cnt], class_cnt, params['thresh_unify'], params['unique_classes'])
flag_1sim2 = determine_similar_location(sed_pred1[frame_cnt][class_cnt], sed_pred2[frame_cnt][class_cnt], doa_pred1[frame_cnt], doa_pred2[frame_cnt], class_cnt, params['thresh_unify'], params['unique_classes'])
flag_2sim0 = determine_similar_location(sed_pred2[frame_cnt][class_cnt], sed_pred0[frame_cnt][class_cnt], doa_pred2[frame_cnt], doa_pred0[frame_cnt], class_cnt, params['thresh_unify'], params['unique_classes'])
# unify or not unify according to flag
if flag_0sim1 + flag_1sim2 + flag_2sim0 == 0:
if sed_pred0[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred0[frame_cnt][class_cnt], doa_pred0[frame_cnt][class_cnt+params['unique_classes']], doa_pred0[frame_cnt][class_cnt+2*params['unique_classes']]])
if sed_pred1[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred1[frame_cnt][class_cnt], doa_pred1[frame_cnt][class_cnt+params['unique_classes']], doa_pred1[frame_cnt][class_cnt+2*params['unique_classes']]])
if sed_pred2[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred2[frame_cnt][class_cnt], doa_pred2[frame_cnt][class_cnt+params['unique_classes']], doa_pred2[frame_cnt][class_cnt+2*params['unique_classes']]])
elif flag_0sim1 + flag_1sim2 + flag_2sim0 == 1:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
if flag_0sim1:
if sed_pred2[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred2[frame_cnt][class_cnt], doa_pred2[frame_cnt][class_cnt+params['unique_classes']], doa_pred2[frame_cnt][class_cnt+2*params['unique_classes']]])
doa_pred_fc = (doa_pred0[frame_cnt] + doa_pred1[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
elif flag_1sim2:
if sed_pred0[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred0[frame_cnt][class_cnt], doa_pred0[frame_cnt][class_cnt+params['unique_classes']], doa_pred0[frame_cnt][class_cnt+2*params['unique_classes']]])
doa_pred_fc = (doa_pred1[frame_cnt] + doa_pred2[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
elif flag_2sim0:
if sed_pred1[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred1[frame_cnt][class_cnt], doa_pred1[frame_cnt][class_cnt+params['unique_classes']], doa_pred1[frame_cnt][class_cnt+2*params['unique_classes']]])
doa_pred_fc = (doa_pred2[frame_cnt] + doa_pred0[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
elif flag_0sim1 + flag_1sim2 + flag_2sim0 >= 2:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
doa_pred_fc = (doa_pred0[frame_cnt] + doa_pred1[frame_cnt] + doa_pred2[frame_cnt]) / 3
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
else:
for frame_cnt in range(sed_pred.shape[0]):
for class_cnt in range(sed_pred.shape[1]):
if sed_pred[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred[frame_cnt][class_cnt], doa_pred[frame_cnt][class_cnt+params['unique_classes']], doa_pred[frame_cnt][class_cnt+2*params['unique_classes']]])
data_generator.write_output_format_file(output_file, output_dict)
test_loss += loss.item()
nb_test_batches += 1
if params['quick_test'] and nb_test_batches == 4:
break
test_loss /= nb_test_batches
return test_loss
def train_epoch(data_generator, optimizer, model, criterion, params, device):
nb_train_batches, train_loss = 0, 0.
model.train()
for data, target in data_generator.generate():
# load one batch of data
data, target = torch.tensor(data).to(device).float(), torch.tensor(target).to(device).float()
optimizer.zero_grad()
# process the batch of data based on chosen mode
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
nb_train_batches += 1
if params['quick_test'] and nb_train_batches == 4:
break
train_loss /= nb_train_batches
return train_loss
def main(argv):
"""
Main wrapper for training sound event localization and detection network.
:param argv: expects two optional inputs.
first input: task_id - (optional) To chose the system configuration in parameters.py.
(default) 1 - uses default parameters
second input: job_id - (optional) all the output files will be uniquely represented with this.
(default) 1
"""
print(argv)
if len(argv) != 3:
print('\n\n')
print('-------------------------------------------------------------------------------------------------------')
print('The code expected two optional inputs')
print('\t>> python seld.py <task-id> <job-id>')
print('\t\t<task-id> is used to choose the user-defined parameter set from parameter.py')
print('Using default inputs for now')
print('\t\t<job-id> is a unique identifier which is used for output filenames (models, training plots). '
'You can use any number or string for this.')
print('-------------------------------------------------------------------------------------------------------')
print('\n\n')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
torch.autograd.set_detect_anomaly(True)
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameters.get_params(task_id)
job_id = 1 if len(argv) < 3 else argv[-1]
# Training setup
train_splits, val_splits, test_splits = None, None, None
if params['mode'] == 'dev':
if '2020' in params['dataset_dir']:
test_splits = [1]
val_splits = [2]
train_splits = [[3, 4, 5, 6]]
elif '2021' in params['dataset_dir']:
test_splits = [6]
val_splits = [5]
train_splits = [[1, 2, 3, 4]]
elif '2022' in params['dataset_dir']:
test_splits = [[4]]
val_splits = [[4]]
train_splits = [[1, 2, 3]]
elif '2023' in params['dataset_dir']:
test_splits = [[4]]
val_splits = [[4]]
train_splits = [[1, 2, 3]]
else:
print('ERROR: Unknown dataset splits')
exit()
for split_cnt, split in enumerate(test_splits):
print('\n\n---------------------------------------------------------------------------------------------------')
print('------------------------------------ SPLIT {} -----------------------------------------------'.format(split))
print('---------------------------------------------------------------------------------------------------')
# Unique name for the run
loc_feat = params['dataset']
if params['dataset'] == 'mic':
if params['use_salsalite']:
loc_feat = '{}_salsa'.format(params['dataset'])
else:
loc_feat = '{}_gcc'.format(params['dataset'])
loc_output = 'multiaccdoa' if params['multi_accdoa'] else 'accdoa'
cls_feature_class.create_folder(params['model_dir'])
unique_name = '{}_{}_{}_split{}_{}_{}'.format(
task_id, job_id, params['mode'], split_cnt, loc_output, loc_feat
)
model_name = '{}_model.h5'.format(os.path.join(params['model_dir'], unique_name))
print("unique_name: {}\n".format(unique_name))
# Load train and validation data
print('Loading training dataset:')
data_gen_train = cls_data_generator.DataGenerator(
params=params, split=train_splits[split_cnt]
)
print('Loading validation dataset:')
data_gen_val = cls_data_generator.DataGenerator(
params=params, split=val_splits[split_cnt], shuffle=False, per_file=True
)
# Collect i/o data size and load model configuration
data_in, data_out = data_gen_train.get_data_sizes()
model = seldnet_model.SeldModel(data_in, data_out, params).to(device)
if params['finetune_mode']:
print('Running in finetuning mode. Initializing the model to the weights - {}'.format(params['pretrained_model_weights']))
model.load_state_dict(torch.load(params['pretrained_model_weights'], map_location='cpu'))
print('---------------- SELD-net -------------------')
print('FEATURES:\n\tdata_in: {}\n\tdata_out: {}\n'.format(data_in, data_out))
print('MODEL:\n\tdropout_rate: {}\n\tCNN: nb_cnn_filt: {}, f_pool_size{}, t_pool_size{}\n, rnn_size: {}\n, nb_attention_blocks: {}\n, fnn_size: {}\n'.format(
params['dropout_rate'], params['nb_cnn2d_filt'], params['f_pool_size'], params['t_pool_size'], params['rnn_size'], params['nb_self_attn_layers'],
params['fnn_size']))
print(model)
# Dump results in DCASE output format for calculating final scores
dcase_output_val_folder = os.path.join(params['dcase_output_dir'], '{}_{}_val'.format(unique_name, strftime("%Y%m%d%H%M%S", gmtime())))
cls_feature_class.delete_and_create_folder(dcase_output_val_folder)
print('Dumping recording-wise val results in: {}'.format(dcase_output_val_folder))
# Initialize evaluation metric class
score_obj = ComputeSELDResults(params)
# start training
best_val_epoch = -1
best_ER, best_F, best_LE, best_LR, best_seld_scr = 1., 0., 180., 0., 9999
patience_cnt = 0
nb_epoch = 2 if params['quick_test'] else params['nb_epochs']
optimizer = optim.Adam(model.parameters(), lr=params['lr'])
if params['multi_accdoa'] is True:
criterion = seldnet_model.MSELoss_ADPIT()
else:
criterion = nn.MSELoss()
for epoch_cnt in range(nb_epoch):
# ---------------------------------------------------------------------
# TRAINING
# ---------------------------------------------------------------------
start_time = time.time()
train_loss = train_epoch(data_gen_train, optimizer, model, criterion, params, device)
train_time = time.time() - start_time
# ---------------------------------------------------------------------
# VALIDATION
# ---------------------------------------------------------------------
start_time = time.time()
val_loss = test_epoch(data_gen_val, model, criterion, dcase_output_val_folder, params, device)
# Calculate the DCASE 2021 metrics - Location-aware detection and Class-aware localization scores
val_ER, val_F, val_LE, val_LR, val_seld_scr, classwise_val_scr = score_obj.get_SELD_Results(dcase_output_val_folder)
val_time = time.time() - start_time
# Save model if loss is good
if val_seld_scr <= best_seld_scr:
best_val_epoch, best_ER, best_F, best_LE, best_LR, best_seld_scr = epoch_cnt, val_ER, val_F, val_LE, val_LR, val_seld_scr
torch.save(model.state_dict(), model_name)
# Print stats
print(
'epoch: {}, time: {:0.2f}/{:0.2f}, '
# 'train_loss: {:0.2f}, val_loss: {:0.2f}, '
'train_loss: {:0.4f}, val_loss: {:0.4f}, '
'ER/F/LE/LR/SELD: {}, '
'best_val_epoch: {} {}'.format(
epoch_cnt, train_time, val_time,
train_loss, val_loss,
'{:0.2f}/{:0.2f}/{:0.2f}/{:0.2f}/{:0.2f}'.format(val_ER, val_F, val_LE, val_LR, val_seld_scr),
best_val_epoch, '({:0.2f}/{:0.2f}/{:0.2f}/{:0.2f}/{:0.2f})'.format(best_ER, best_F, best_LE, best_LR, best_seld_scr))
)
patience_cnt += 1
if patience_cnt > params['patience']:
break
# ---------------------------------------------------------------------
# Evaluate on unseen test data
# ---------------------------------------------------------------------
print('Load best model weights')
model.load_state_dict(torch.load(model_name, map_location='cpu'))
print('Loading unseen test dataset:')
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=test_splits[split_cnt], shuffle=False, per_file=True
)
# Dump results in DCASE output format for calculating final scores
dcase_output_test_folder = os.path.join(params['dcase_output_dir'], '{}_{}_test'.format(unique_name, strftime("%Y%m%d%H%M%S", gmtime())))
cls_feature_class.delete_and_create_folder(dcase_output_test_folder)
print('Dumping recording-wise test results in: {}'.format(dcase_output_test_folder))
test_loss = test_epoch(data_gen_test, model, criterion, dcase_output_test_folder, params, device)
use_jackknife=True
test_ER, test_F, test_LE, test_LR, test_seld_scr, classwise_test_scr = score_obj.get_SELD_Results(dcase_output_test_folder, is_jackknife=use_jackknife )
print('\nTest Loss')
print('SELD score (early stopping metric): {:0.2f} {}'.format(test_seld_scr[0] if use_jackknife else test_seld_scr, '[{:0.2f}, {:0.2f}]'.format(test_seld_scr[1][0], test_seld_scr[1][1]) if use_jackknife else ''))
print('SED metrics: Error rate: {:0.2f} {}, F-score: {:0.1f} {}'.format(test_ER[0] if use_jackknife else test_ER, '[{:0.2f}, {:0.2f}]'.format(test_ER[1][0], test_ER[1][1]) if use_jackknife else '', 100* test_F[0] if use_jackknife else 100* test_F, '[{:0.2f}, {:0.2f}]'.format(100* test_F[1][0], 100* test_F[1][1]) if use_jackknife else ''))
print('DOA metrics: Localization error: {:0.1f} {}, Localization Recall: {:0.1f} {}'.format(test_LE[0] if use_jackknife else test_LE, '[{:0.2f} , {:0.2f}]'.format(test_LE[1][0], test_LE[1][1]) if use_jackknife else '', 100*test_LR[0] if use_jackknife else 100*test_LR,'[{:0.2f}, {:0.2f}]'.format(100*test_LR[1][0], 100*test_LR[1][1]) if use_jackknife else ''))
if params['average']=='macro':
print('Classwise results on unseen test data')
print('Class\tER\tF\tLE\tLR\tSELD_score')
for cls_cnt in range(params['unique_classes']):
print('{}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}'.format(
cls_cnt,
classwise_test_scr[0][0][cls_cnt] if use_jackknife else classwise_test_scr[0][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][0][cls_cnt][0], classwise_test_scr[1][0][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][1][cls_cnt] if use_jackknife else classwise_test_scr[1][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][1][cls_cnt][0], classwise_test_scr[1][1][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][2][cls_cnt] if use_jackknife else classwise_test_scr[2][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][2][cls_cnt][0], classwise_test_scr[1][2][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][3][cls_cnt] if use_jackknife else classwise_test_scr[3][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][3][cls_cnt][0], classwise_test_scr[1][3][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][4][cls_cnt] if use_jackknife else classwise_test_scr[4][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][4][cls_cnt][0], classwise_test_scr[1][4][cls_cnt][1]) if use_jackknife else ''))
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| 22,604 | 55.5125 | 369 | py |
AACVP-MVSNet | AACVP-MVSNet-main/train_AACVPMVSNet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/3 0016 11:52
# @Author : Anzhu Yu
# @Site :
# @File : train_AACVPMVSNet.py
# @Software: PyCharm
# some packages used in this project
from argsParser import getArgsParser, checkArgs
import os
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from datasets import dtu_loader
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import time
from tensorboardX import SummaryWriter
from datasets import find_dataset_def
from models import *
from utils import *
import gc
import sys
import datetime
import torch.utils
import torch.utils.checkpoint
from torchscan import summary
# CUDA_LAUNCH_BLOCKING=1
parser = getArgsParser()
args = parser.parse_args()
assert args.mode == "train", 'HERE IS THE TRAINING MODE!'
checkArgs(args)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
# logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
curTime = time.strftime('%Y%m%d-%H%M', time.localtime(time.time()))
log_path = args.loggingdir + args.info.replace(" ", "_") + "/"
if not os.path.isdir(args.loggingdir):
os.mkdir(args.loggingdir)
if not os.path.isdir(log_path):
os.mkdir(log_path)
log_name = log_path + curTime + '.log'
logfile = log_name
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fileHandler = logging.FileHandler(logfile, mode='a')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.info("Logger initialized.")
logger.info("Writing logs to file:" + logfile)
settings_str = "All settings:\n"
line_width = 20
for k, v in vars(args).items():
settings_str += '{0}: {1}\n'.format(k, v)
logger.info(settings_str)
# Read the Data,
train_dataset = dtu_loader.MVSDataset(args, logger)
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=16, drop_last=True)
# Build the model
model = AACVPMVSNet(args, group=args.groups, num_heads=args.num_heads)
# Use the cuda_ids to determine the GPUs used for experiments
device_id_list = [int(idd) for idd in args.cuda_ids.split(',')]
if len(device_id_list) == 1 and (device_id_list[0] != 666):
print("Now multi-GPUs mode activated!")
device_ids = [int(args.cuda_ids)]
elif (device_id_list[0] == 666):
model = model.cpu()
else:
device_ids = device_id_list
del device_id_list
# GPUs
if args.mode == "train" and torch.cuda.is_available():
model = nn.DataParallel(model, device_ids=device_ids, output_device=device_ids[0])
if torch.cuda.is_available():
model.cuda()
model.train()
if args.loss_function == "sl1":
logger.info("Using smoothed L1 loss")
model_loss = sL1_loss
else: # MSE
logger.info("Using MSE loss")
model_loss = MSE_loss
logger.info(">>> total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
# model_loss = mvsnet_loss
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=args.wd)
# Start at a given checkpoint
sw_path = args.logckptdir + args.info + "/"
start_epoch = 0
if (args.mode == "train" and args.resume) or (args.mode == "test" and not args.loadckpt):
logger.info("Resuming or testing...")
saved_models = [fn for fn in os.listdir(sw_path) if fn.endswith(".ckpt")]
saved_models = sorted(saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0]))
# use the latest checkpoint file
loadckpt = os.path.join(sw_path, saved_models[-1])
logger.info("Resuming " + loadckpt)
state_dict = torch.load(loadckpt)
model.load_state_dict(state_dict['model'])
optimizer.load_state_dict(state_dict['optimizer'])
start_epoch = state_dict['epoch'] + 1
elif args.loadckpt:
# load checkpoint file specified by args.loadckpt
logger.info("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'])
print("start at epoch {}".format(start_epoch))
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# Training at each epoch
def train():
milestones = [int(epoch_idx) for epoch_idx in args.lrepochs.split(':')[0].split(',')]
lr_gamma = 1 / float(args.lrepochs.split(':')[1])
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=lr_gamma,
last_epoch=start_epoch - 1)
# epoch stat
last_loss = None
this_loss = None
for epoch_idx in range(start_epoch, args.epochs):
logger.info('Epoch {}:'.format(epoch_idx))
global_step = len(train_loader) * epoch_idx
if last_loss is None:
last_loss = 999999
else:
last_loss = this_loss
this_loss = []
for batch_idx, sample in enumerate(train_loader):
start_time = time.time()
global_step = len(train_loader) * epoch_idx + batch_idx
do_summary = global_step % args.summary_freq == 0
loss = train_sample(sample, detailed_summary=do_summary)
this_loss.append(loss)
logger.info(
'Epoch {}/{}, Iter {}/{}, train loss = {:.3f}, time = {:.3f}'.format(epoch_idx, args.epochs, batch_idx,
len(train_loader), loss,
time.time() - start_time))
# checkpoint
if (epoch_idx + 1) % args.save_freq == 0:
torch.save({
'epoch': epoch_idx,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()},
"{}/model_{:0>6}.ckpt".format(args.logckptdir + args.info.replace(" ", "_"), epoch_idx))
logger.info("model_{:0>6}.ckpt saved".format(epoch_idx))
this_loss = np.mean(this_loss)
logger.info("Epoch loss: {:.5f} --> {:.5f}".format(last_loss, this_loss))
lr_scheduler.step()
# Training for each batch
def train_sample(sample, detailed_summary=False):
"""
:param sample: each batch
:param detailed_summary: whether the detailed logs are needed.
:return: the loss
"""
# model.train() is not needed here, however it is often used to state this script is not for evaluation.
model.train()
optimizer.zero_grad()
sample_cuda = tocuda(sample)
ref_depths = sample_cuda["ref_depths"]
# forward
outputs = model(sample_cuda["ref_img"].float(), sample_cuda["src_imgs"].float(), sample_cuda["ref_intrinsics"],
sample_cuda["src_intrinsics"], sample_cuda["ref_extrinsics"], sample_cuda["src_extrinsics"],
sample_cuda["depth_min"], sample_cuda["depth_max"])
depth_est_list = outputs["depth_est_list"]
dHeight = ref_depths.shape[2]
dWidth = ref_depths.shape[3]
loss = []
for i in range(0, args.nscale):
# generate the masks.
depth_gt = ref_depths[:, i, :int(dHeight / 2 ** i), :int(dWidth / 2 ** i)]
mask = depth_gt > 425
loss.append(model_loss(depth_est_list[i], depth_gt.float(), mask))
loss = sum(loss)
loss.backward()
optimizer.step()
return loss.data.cpu().item()
# main function, the start of this program
if __name__ == '__main__':
if args.mode == "train":
train()
| 7,685 | 33.466368 | 119 | py |
AACVP-MVSNet | AACVP-MVSNet-main/eval_AACVPMVSNet.py | # Evaluate AACVP-MVSNet
# Modified by: Bing Liu
import os, sys, time, logging, argparse, datetime, re
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import dtu_loader
from models import *
# from models.modules import *
from utils import *
from PIL import Image
from argsParser import getArgsParser
from plyfile import PlyData, PlyElement
# Debug import
import pdb
import matplotlib.pyplot as plt
cudnn.benchmark = True
# Arg parser
parser = getArgsParser()
args = parser.parse_args()
assert args.mode == "test"
# logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
curTime = time.strftime('%Y%m%d-%H%M', time.localtime(time.time()))
log_path = args.loggingdir + args.info.replace(" ", "_") + "/"
if not os.path.isdir(args.loggingdir):
os.mkdir(args.loggingdir)
if not os.path.isdir(log_path):
os.mkdir(log_path)
log_name = log_path + curTime + '.log'
logfile = log_name
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fileHandler = logging.FileHandler(logfile, mode='a')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.info("Logger initialized.")
logger.info("Writing logs to file:" + logfile)
settings_str = "All settings:\n"
line_width = 30
for k, v in vars(args).items():
settings_str += '{0}: {1}\n'.format(k, v)
logger.info(settings_str)
# Run AACVP-MVSNet to save depth maps and confidence maps
def save_depth():
# dataset, dataloader
test_dataset = dtu_loader.MVSDataset(args, logger)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=args.eval_shuffle, num_workers=16, drop_last=True)
model = AACVPMVSNet(args, group = args.groups, num_heads = args.num_heads)
device_ids = [0]
model = nn.DataParallel(model, device_ids=device_ids, output_device=device_ids[0])
model.cuda()
logger.info("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'], strict=False)
with torch.no_grad():
for batch_idx, sample in enumerate(test_loader):
start_time = time.time()
sample_cuda = tocuda(sample)
torch.cuda.empty_cache()
outputs = model( \
sample_cuda["ref_img"].float(), \
sample_cuda["src_imgs"].float(), \
sample_cuda["ref_intrinsics"], \
sample_cuda["src_intrinsics"], \
sample_cuda["ref_extrinsics"], \
sample_cuda["src_extrinsics"], \
sample_cuda["depth_min"], \
sample_cuda["depth_max"])
depth_est_list = outputs["depth_est_list"]
depth_est = depth_est_list[0].data.cpu().numpy()
prob_confidence = outputs["prob_confidence"].data.cpu().numpy()
del sample_cuda
filenames = sample["filename"]
logger.info('Iter {}/{}, time = {:.3f}'.format(batch_idx, len(test_loader), time.time() - start_time))
# save depth maps and confidence maps
for filename, est_depth, photometric_confidence in zip(filenames, depth_est, prob_confidence):
# print(depth_est.shape, prob_confidence.shape)
depth_filename = os.path.join(args.outdir, filename.format('depth_est', '.pfm'))
confidence_filename = os.path.join(args.outdir, filename.format('confidence', '.pfm'))
os.makedirs(depth_filename.rsplit('/', 1)[0], exist_ok=True)
os.makedirs(confidence_filename.rsplit('/', 1)[0], exist_ok=True)
# save depth maps
save_pfm(depth_filename, est_depth)
write_depth_img(depth_filename + ".png", est_depth)
# Save prob maps
save_pfm(confidence_filename, photometric_confidence)
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def read_camera_parameters(filename):
with open(filename) as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
return intrinsics, extrinsics
def read_pair_file(filename):
data = []
with open(filename) as f:
num_viewpoint = int(f.readline())
# 49 viewpoints
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
data.append((ref_view, src_views))
return data
# read an image
def read_img(filename):
img = Image.open(filename)
# Crop image (For DTU only)
left = 0
top = 0
right = 1600
bottom = 1184
img = img.crop((left, top, right, bottom))
# scale 0~255 to 0~1
np_img = np.array(img, dtype=np.uint8)
return np_img
# read a binary mask
def read_mask(filename):
return read_img(filename) > 0.5
# save a binary mask
def save_mask(filename, mask):
assert mask.dtype == np.bool
mask = mask.astype(np.uint8) * 255
Image.fromarray(mask).save(filename)
def save_pfm(filename, image, scale=1):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(filename, "wb")
color = None
image = np.flipud(image)
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
# print((image.shape))
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
file.write('{} {}\n'.format(image.shape[1], image.shape[0]).encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write(('%f\n' % scale).encode('utf-8'))
image.tofile(file)
file.close()
def write_depth_img(filename, depth):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
image = Image.fromarray((depth - 500) / 2).convert("L")
image.save(filename)
return 1
# project the reference point cloud into the source view, then project back
def reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
## step1. project reference pixels to the source view
# reference view x, y
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])
# reference 3D space
xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref),
np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1]))
# source 3D space
xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)),
np.vstack((xyz_ref, np.ones_like(x_ref))))[:3]
# source view x, y
K_xyz_src = np.matmul(intrinsics_src, xyz_src)
xy_src = K_xyz_src[:2] / K_xyz_src[2:3]
## step2. reproject the source view points with source view depth estimation
# find the depth estimation of the source view
x_src = xy_src[0].reshape([height, width]).astype(np.float32)
y_src = xy_src[1].reshape([height, width]).astype(np.float32)
sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)
# mask = sampled_depth_src > 0
# source 3D space
# NOTE that we should use sampled source-view depth_here to project back
xyz_src = np.matmul(np.linalg.inv(intrinsics_src),
np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1]))
# reference 3D space
xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)),
np.vstack((xyz_src, np.ones_like(x_ref))))[:3]
# source view x, y, depth
depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32)
K_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected)
xy_reprojected = K_xyz_reprojected[:2] / K_xyz_reprojected[2:3]
x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32)
y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32)
return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src
def check_geometric_consistency(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src = reproject_with_depth(depth_ref,
intrinsics_ref,
extrinsics_ref,
depth_src,
intrinsics_src,
extrinsics_src)
# check |p_reproj-p_1| < 1
dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)
# check |d_reproj-d_1| / d_1 < 0.01
depth_diff = np.abs(depth_reprojected - depth_ref)
relative_depth_diff = depth_diff / depth_ref
mask = np.logical_and(dist < 0.5, relative_depth_diff < 0.01)
depth_reprojected[~mask] = 0
return mask, depth_reprojected, x2d_src, y2d_src
def filter_depth(dataset_root, scan, out_folder, plyfilename):
print("Starting fusion for:" + out_folder)
# the pair file
pair_file = os.path.join(dataset_root, 'Cameras/pair.txt')
# for the final point cloud
vertexs = []
vertex_colors = []
pair_data = read_pair_file(pair_file)
nviews = len(pair_data)
# for each reference view and the corresponding source views
for ref_view, src_views in pair_data:
# load the camera parameters
ref_intrinsics, ref_extrinsics = read_camera_parameters(
os.path.join(dataset_root, 'Cameras/{:0>8}_cam.txt'.format(ref_view)))
# load the reference image
ref_img = read_img(os.path.join(dataset_root, "Rectified", scan,
'rect_{:03d}_3_r5000.png'.format(ref_view + 1))) # Image start from 1.
# load the estimated depth of the reference view
ref_depth_est, scale = read_pfm(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(ref_view)))
# load the photometric mask of the reference view
confidence, scale = read_pfm(os.path.join(out_folder, 'confidence/{:0>8}.pfm'.format(ref_view)))
photo_mask = confidence > 0.9
all_srcview_depth_ests = []
all_srcview_x = []
all_srcview_y = []
all_srcview_geomask = []
# compute the geometric mask
geo_mask_sum = 0
for src_view in src_views:
# camera parameters of the source view
src_intrinsics, src_extrinsics = read_camera_parameters(
os.path.join(dataset_root, 'Cameras/{:0>8}_cam.txt'.format(src_view)))
# the estimated depth of the source view
src_depth_est, scale = read_pfm(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(src_view)))
geo_mask, depth_reprojected, x2d_src, y2d_src = check_geometric_consistency(ref_depth_est, ref_intrinsics,
ref_extrinsics,
src_depth_est,
src_intrinsics, src_extrinsics)
geo_mask_sum += geo_mask.astype(np.int32)
all_srcview_depth_ests.append(depth_reprojected)
all_srcview_x.append(x2d_src)
all_srcview_y.append(y2d_src)
all_srcview_geomask.append(geo_mask)
depth_est_averaged = (sum(all_srcview_depth_ests) + ref_depth_est) / (geo_mask_sum + 1)
# at least 3 source views matched
geo_mask = geo_mask_sum >= 3
final_mask = np.logical_and(photo_mask, geo_mask)
os.makedirs(os.path.join(out_folder, "mask"), exist_ok=True)
save_mask(os.path.join(out_folder, "mask/{:0>8}_photo.png".format(ref_view)), photo_mask)
save_mask(os.path.join(out_folder, "mask/{:0>8}_geo.png".format(ref_view)), geo_mask)
save_mask(os.path.join(out_folder, "mask/{:0>8}_final.png".format(ref_view)), final_mask)
print("processing {}, ref-view{:0>2}, photo/geo/final-mask:{}/{}/{}".format(scan, ref_view,
photo_mask.mean(),
geo_mask.mean(), final_mask.mean()))
height, width = depth_est_averaged.shape[:2]
x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))
# valid_points = np.logical_and(final_mask, ~used_mask[ref_view])
valid_points = final_mask
print("valid_points", valid_points.mean())
x, y, depth = x[valid_points], y[valid_points], depth_est_averaged[valid_points]
ref_img = np.array(ref_img)
color = ref_img[valid_points]
xyz_ref = np.matmul(np.linalg.inv(ref_intrinsics),
np.vstack((x, y, np.ones_like(x))) * depth)
xyz_world = np.matmul(np.linalg.inv(ref_extrinsics),
np.vstack((xyz_ref, np.ones_like(x))))[:3]
vertexs.append(xyz_world.transpose((1, 0)))
vertex_colors.append((color).astype(np.uint8))
vertexs = np.concatenate(vertexs, axis=0)
vertex_colors = np.concatenate(vertex_colors, axis=0)
vertexs = np.array([tuple(v) for v in vertexs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_colors = np.array([tuple(v) for v in vertex_colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
for prop in vertexs.dtype.names:
vertex_all[prop] = vertexs[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, 'vertex')
print("Saving the final model to", plyfilename)
PlyData([el], comments=['Model created by AACVP-MVSNet.']).write(plyfilename)
print("Model saved.")
if __name__ == '__main__':
# Inference depth maps
save_depth()
# Next: using the fusibile toolbox for depth map fusion and 3D reconstruction
| 16,769 | 38.833729 | 120 | py |
AACVP-MVSNet | AACVP-MVSNet-main/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/6/28 0028 11:55
# @Author : Anzhu Yu
# @Site :
# @File : utils.py
# @Software: PyCharm
import numpy as np
import torchvision.utils as vutils
import torch
import torch.nn.functional as F
# print arguments
def print_args(args):
print("################################ args ################################")
for k, v in args.__dict__.items():
print("{0: <10}\t{1: <30}\t{2: <20}".format(k, str(v), str(type(v))))
print("########################################################################")
# torch.no_grad warpper for functions
def make_nograd_func(func):
def wrapper(*f_args, **f_kwargs):
with torch.no_grad():
ret = func(*f_args, **f_kwargs)
return ret
return wrapper
# convert a function into recursive style to handle nested dict/list/tuple variables
def make_recursive_func(func):
def wrapper(vars):
if isinstance(vars, list):
return [wrapper(x) for x in vars]
elif isinstance(vars, tuple):
return tuple([wrapper(x) for x in vars])
elif isinstance(vars, dict):
return {k: wrapper(v) for k, v in vars.items()}
else:
return func(vars)
return wrapper
@make_recursive_func
def tensor2float(vars):
if isinstance(vars, float):
return vars
elif isinstance(vars, torch.Tensor):
return vars.data.item()
else:
raise NotImplementedError("invalid input type {} for tensor2float".format(type(vars)))
@make_recursive_func
def tensor2numpy(vars):
if isinstance(vars, np.ndarray):
return vars
elif isinstance(vars, torch.Tensor):
return vars.detach().cpu().numpy().copy()
else:
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars)))
@make_recursive_func
def tocuda(vars):
if isinstance(vars, torch.Tensor):
return vars.cuda()
elif isinstance(vars, str):
return vars
else:
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars)))
def save_scalars(logger, mode, scalar_dict, global_step):
scalar_dict = tensor2float(scalar_dict)
for key, value in scalar_dict.items():
if not isinstance(value, (list, tuple)):
name = '{}/{}'.format(mode, key)
logger.add_scalar(name, value, global_step)
else:
for idx in range(len(value)):
name = '{}/{}_{}'.format(mode, key, idx)
logger.add_scalar(name, value[idx], global_step)
def save_images(logger, mode, images_dict, global_step):
images_dict = tensor2numpy(images_dict)
def preprocess(name, img):
if not (len(img.shape) == 3 or len(img.shape) == 4):
raise NotImplementedError("invalid img shape {}:{} in save_images".format(name, img.shape))
if len(img.shape) == 3:
img = img[:, np.newaxis, :, :]
img = torch.from_numpy(img[:1])
return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True)
for key, value in images_dict.items():
if not isinstance(value, (list, tuple)):
name = '{}/{}'.format(mode, key)
logger.add_image(name, preprocess(name, value), global_step)
else:
for idx in range(len(value)):
name = '{}/{}_{}'.format(mode, key, idx)
logger.add_image(name, preprocess(name, value[idx]), global_step)
class DictAverageMeter(object):
def __init__(self):
self.data = {}
self.count = 0
def update(self, new_input):
self.count += 1
if len(self.data) == 0:
for k, v in new_input.items():
if not isinstance(v, float):
raise NotImplementedError("invalid data {}: {}".format(k, type(v)))
self.data[k] = v
else:
for k, v in new_input.items():
if not isinstance(v, float):
raise NotImplementedError("invalid data {}: {}".format(k, type(v)))
self.data[k] += v
def mean(self):
return {k: v / self.count for k, v in self.data.items()}
# a wrapper to compute metrics for each image individually
def compute_metrics_for_each_image(metric_func):
def wrapper(depth_est, depth_gt, mask, *args):
batch_size = depth_gt.shape[0]
results = []
# compute result one by one
for idx in range(batch_size):
ret = metric_func(depth_est[idx], depth_gt[idx], mask[idx], *args)
results.append(ret)
return torch.stack(results).mean()
return wrapper
@make_nograd_func
@compute_metrics_for_each_image
def Thres_metrics(depth_est, depth_gt, mask, thres):
assert isinstance(thres, (int, float))
depth_est, depth_gt = depth_est[mask], depth_gt[mask]
errors = torch.abs(depth_est - depth_gt)
err_mask = errors > thres
return torch.mean(err_mask.float())
# NOTE: please do not use this to build up training loss
@make_nograd_func
@compute_metrics_for_each_image
def AbsDepthError_metrics(depth_est, depth_gt, mask):
depth_est, depth_gt = depth_est[mask], depth_gt[mask]
return torch.mean((depth_est - depth_gt).abs()) | 5,284 | 31.623457 | 103 | py |
AACVP-MVSNet | AACVP-MVSNet-main/models/Module.py | # -*- coding: utf-8 -*-
# @Time : 2020/6/18 0018 20:57
# @Author : Anzhu Yu
# @Site :
# @File : module.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def homo_warping(src_feature, ref_in, src_in, ref_ex, src_ex, depth_hypos):
# Apply homography warpping on one src feature map from src to ref view.
batch, channels = src_feature.shape[0], src_feature.shape[1]
num_depth = depth_hypos.shape[1]
height, width = src_feature.shape[2], src_feature.shape[3]
with torch.no_grad():
src_proj = torch.matmul(src_in, src_ex[:, 0:3, :])
ref_proj = torch.matmul(ref_in, ref_ex[:, 0:3, :])
last = torch.tensor([[[0, 0, 0, 1.0]]]).repeat(len(src_in), 1, 1).cuda()
src_proj = torch.cat((src_proj, last), 1)
ref_proj = torch.cat((ref_proj, last), 1)
proj = torch.matmul(src_proj, torch.inverse(ref_proj))
rot = proj[:, :3, :3] # [B,3,3]
trans = proj[:, :3, 3:4] # [B,3,1]
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=src_feature.device),
torch.arange(0, width, dtype=torch.float32, device=src_feature.device)])
y, x = y.contiguous(), x.contiguous()
y, x = y.view(height * width), x.view(height * width)
xyz = torch.stack((x, y, torch.ones_like(x))) # [3, H*W]
xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1) # [B, 3, H*W]
rot_xyz = torch.matmul(rot, xyz) # [B, 3, H*W]
rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_hypos.view(batch, 1, num_depth,
1) # [B, 3, Ndepth, H*W]
proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1) # [B, 3, Ndepth, H*W]
proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :] # [B, 2, Ndepth, H*W]
proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3) # [B, Ndepth, H*W, 2]
grid = proj_xy
warped_src_fea = F.grid_sample(src_feature, grid.view(batch, num_depth * height, width, 2), mode='bilinear',
padding_mode='zeros')
warped_src_fea = warped_src_fea.view(batch, channels, num_depth, height, width)
return warped_src_fea
def depth_regression(p, depth_values):
"""
:param p: probability volume [B, D, H, W]
:param depth_values: discrete depth values [B, D]
:return: depth
"""
depth_values = depth_values.view(*depth_values.shape, 1, 1)
depth = torch.sum(p * depth_values, 1)
return depth
# Self-attention layer
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, groups=1, bias=False):
super(AttentionConv, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
# make sure that out_channels = 0 (mod groups)
assert self.out_channels % self.groups == 0, "ERROR INPUT,CHECK AGAIN!"
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self.padding])
# Learned transformation.
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch, -1, height, width)
# Activation here. The same with all the other conv layers.
return nn.LeakyReLU(0.1)(out)
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
## General convolution
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
# Feature Extraction
class FeaturePyramid(nn.Module):
def __init__(self, num_heads=1):
super(FeaturePyramid, self).__init__()
self.conv0aa = conv(3, 64, kernel_size=3, stride=1)
self.conv0ba = conv(64, 64, kernel_size=3, stride=1)
self.conv0bb = conv(64, 64, kernel_size=3, stride=1)
self.conv0bc = conv(64, 32, kernel_size=3, stride=1)
self.conv0bd = conv(32, 32, kernel_size=3, stride=1)
self.conv0be = conv(32, 32, kernel_size=3, stride=1)
self.conv0bf = conv(32, 16, kernel_size=3, stride=1)
self.conv0bg = conv(16, 16, kernel_size=3, stride=1)
self.conv0bh = AttentionConv(16, 16, kernel_size=3, stride=1, groups=num_heads)
def forward(self, img, scales=5):
fp = []
f = self.conv0aa(img)
f = self.conv0bh(
self.conv0bg(self.conv0bf(self.conv0be(self.conv0bd(self.conv0bc(self.conv0bb(self.conv0ba(f))))))))
fp.append(f)
for scale in range(scales - 1):
img = nn.functional.interpolate(img, scale_factor=0.5, mode='bilinear', align_corners=None).detach()
f = self.conv0aa(img)
f = self.conv0bh(
self.conv0bg(self.conv0bf(self.conv0be(self.conv0bd(self.conv0bc(self.conv0bb(self.conv0ba(f))))))))
fp.append(f)
return fp
def conditionIntrinsics(intrinsics, img_shape, fp_shapes):
# Pre-condition intrinsics according to feature pyramid shape.
# Calculate downsample ratio for each level of feature pyramid
down_ratios = []
for fp_shape in fp_shapes:
down_ratios.append(img_shape[2] / fp_shape[2])
# condition intrinsics
intrinsics_out = []
for down_ratio in down_ratios:
intrinsics_tmp = intrinsics.clone()
# print(down_ratio)
intrinsics_tmp[:, :2, :] = intrinsics_tmp[:, :2, :] / down_ratio
intrinsics_out.append(intrinsics_tmp)
return torch.stack(intrinsics_out).permute(1, 0, 2, 3) # [B, nScale, 3, 3]
def calInitDepthInterval(ref_in, src_in, ref_ex, src_ex, pixel_interval):
return 165 # The mean depth interval calculated on 4-1 interval setting...
def calSweepingDepthHypo(ref_in, src_in, ref_ex, src_ex, depth_min, depth_max, nhypothesis_init=48):
# Batch
batchSize = ref_in.shape[0]
depth_range = depth_max[0] - depth_min[0]
depth_interval_mean = depth_range / (nhypothesis_init - 1)
# Make sure the number of depth hypothesis has a factor of 2
assert nhypothesis_init % 2 == 0
depth_hypos = torch.range(depth_min[0], depth_max[0], depth_interval_mean).unsqueeze(0)
# Assume depth range is consistent in one batch.
for b in range(1, batchSize):
depth_range = depth_max[b] - depth_min[b]
depth_hypos = torch.cat(
(depth_hypos, torch.range(depth_min[0], depth_max[0], depth_interval_mean).unsqueeze(0)), 0)
return depth_hypos.cuda()
def calDepthHypo(netArgs, ref_depths, ref_intrinsics, src_intrinsics, ref_extrinsics, src_extrinsics, depth_min,
depth_max, level):
## Calculate depth hypothesis maps for refine steps
# These two parameters determining the depth searching range and interval at finer level.
# For experiments on other datasets, the pixel_interval could be modified accordingly to get better results.
d = 4
pixel_interval = 1
nBatch = ref_depths.shape[0]
height = ref_depths.shape[1]
width = ref_depths.shape[2]
# Hard code the interval for training on DTU with 1 level of refinement.
# This depth interval is estimated by J.Yang for training boosting.
# Uncomment this part if other dataset is used.
if netArgs.mode == "train":
depth_interval = torch.tensor(
[6.8085] * nBatch).cuda()
depth_hypos = ref_depths.unsqueeze(1).repeat(1, d * 2, 1, 1)
# print(depth_interval[0])
for depth_level in range(-d, d):
depth_hypos[:, depth_level + d, :, :] += (depth_level) * depth_interval[0]
return depth_hypos
with torch.no_grad():
ref_depths = ref_depths
ref_intrinsics = ref_intrinsics.double()
src_intrinsics = src_intrinsics.squeeze(1).double()
ref_extrinsics = ref_extrinsics.double()
src_extrinsics = src_extrinsics.squeeze(1).double()
interval_maps = []
depth_hypos = ref_depths.unsqueeze(1).repeat(1, d * 2, 1, 1)
for batch in range(nBatch):
xx, yy = torch.meshgrid([torch.arange(0, width).cuda(), torch.arange(0, height).cuda()])
xxx = xx.reshape([-1]).double()
yyy = yy.reshape([-1]).double()
X = torch.stack([xxx, yyy, torch.ones_like(xxx)], dim=0)
D1 = torch.transpose(ref_depths[batch, :, :], 0, 1).reshape(
[-1]) # Transpose before reshape to produce identical results to numpy and matlab version.
D2 = D1 + 1
X1 = X * D1
X2 = X * D2
ray1 = torch.matmul(torch.inverse(ref_intrinsics[batch]), X1)
ray2 = torch.matmul(torch.inverse(ref_intrinsics[batch]), X2)
X1 = torch.cat([ray1, torch.ones_like(xxx).unsqueeze(0).double()], dim=0)
X1 = torch.matmul(torch.inverse(ref_extrinsics[batch]), X1)
X2 = torch.cat([ray2, torch.ones_like(xxx).unsqueeze(0).double()], dim=0)
X2 = torch.matmul(torch.inverse(ref_extrinsics[batch]), X2)
X1 = torch.matmul(src_extrinsics[batch][0], X1)
X2 = torch.matmul(src_extrinsics[batch][0], X2)
X1 = X1[:3]
X1 = torch.matmul(src_intrinsics[batch][0], X1)
X1_d = X1[2].clone()
X1 /= X1_d
X2 = X2[:3]
X2 = torch.matmul(src_intrinsics[batch][0], X2)
X2_d = X2[2].clone()
X2 /= X2_d
k = (X2[1] - X1[1]) / (X2[0] - X1[0])
b = X1[1] - k * X1[0]
theta = torch.atan(k)
X3 = X1 + torch.stack(
[torch.cos(theta) * pixel_interval, torch.sin(theta) * pixel_interval, torch.zeros_like(X1[2, :])],
dim=0)
A = torch.matmul(ref_intrinsics[batch], ref_extrinsics[batch][:3, :3])
tmp = torch.matmul(src_intrinsics[batch][0], src_extrinsics[batch][0, :3, :3])
A = torch.matmul(A, torch.inverse(tmp))
tmp1 = X1_d * torch.matmul(A, X1)
tmp2 = torch.matmul(A, X3)
M1 = torch.cat([X.t().unsqueeze(2), tmp2.t().unsqueeze(2)], axis=2)[:, 1:, :]
M2 = tmp1.t()[:, 1:]
ans = torch.matmul(torch.inverse(M1), M2.unsqueeze(2))
delta_d = ans[:, 0, 0]
interval_maps = torch.abs(delta_d).mean().repeat(ref_depths.shape[2], ref_depths.shape[1]).t()
for depth_level in range(-d, d):
depth_hypos[batch, depth_level + d, :, :] += depth_level * interval_maps
return depth_hypos.float() # Return the depth hypothesis map from statistical interval setting.
def depth_regression_refine(prob_volume, depth_hypothesis):
depth = torch.sum(prob_volume * depth_hypothesis, 1)
return depth
def proj_cost_AACVP(Group, settings, ref_feature, src_feature, level, ref_in, src_in, ref_ex, src_ex, depth_hypos):
## Calculate the cost volume for refined depth hypothesis selection
# AACVP Version.
batch, channels = ref_feature.shape[0], ref_feature.shape[1]
num_depth = depth_hypos.shape[1]
height, width = ref_feature.shape[2], ref_feature.shape[3]
B, C, H, W = ref_feature.shape
volume_sum = ref_feature.unsqueeze(2).repeat(1, 1, num_depth, 1, 1)
ref_volume = volume_sum
ref_volume = ref_volume.view(B, Group, C // Group, *ref_volume.shape[-3:])
volume_sum = 0
for src in range(settings.nsrc):
with torch.no_grad():
src_proj = torch.matmul(src_in[:, src, :, :], src_ex[:, src, 0:3, :])
ref_proj = torch.matmul(ref_in, ref_ex[:, 0:3, :])
last = torch.tensor([[[0, 0, 0, 1.0]]]).repeat(len(src_in), 1, 1).cuda()
src_proj = torch.cat((src_proj, last), 1)
ref_proj = torch.cat((ref_proj, last), 1)
proj = torch.matmul(src_proj, torch.inverse(ref_proj))
rot = proj[:, :3, :3]
trans = proj[:, :3, 3:4]
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=ref_feature.device),
torch.arange(0, width, dtype=torch.float32, device=ref_feature.device)])
y, x = y.contiguous(), x.contiguous()
y, x = y.view(height * width), x.view(height * width)
xyz = torch.stack((x, y, torch.ones_like(x)))
xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1)
rot_xyz = torch.matmul(rot, xyz)
rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_hypos.view(batch, 1, num_depth,
height * width) # [B, 3, Ndepth, H*W]
proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1)
proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :]
proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3)
grid = proj_xy
warped_src_fea = F.grid_sample(src_feature[src][level], grid.view(batch, num_depth * height, width, 2),
mode='bilinear',
padding_mode='zeros')
warped_src_fea = warped_src_fea.view(batch, channels, num_depth, height, width)
warped_src_fea = warped_src_fea.to(ref_volume.dtype)
warped_src_fea = warped_src_fea.view(*ref_volume.shape)
if settings.mode == 'training':
volume_sum = volume_sum + warped_src_fea # (B, Group, C//Group, D, h, w)
else:
volume_sum += warped_src_fea
del warped_src_fea
volume_variance = (volume_sum * ref_volume).mean(2).div_(settings.nsrc) # (B, Group, D, h, w)
del volume_sum, ref_volume
return volume_variance
| 16,022 | 44.649573 | 133 | py |
AACVP-MVSNet | AACVP-MVSNet-main/models/AACVPMVSNet.py | # -*- coding: utf-8 -*-
# @Time : 2020/6/18 0018 20:57
# @Author : Anzhu Yu
# @Site :
# @File : AACVPMVSNet.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
from .Module import *
class ConvBnReLU3D(nn.Module):
"""ConvBnReLU3D
3D CNN Blocks with batchnorm and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(ConvBnReLU3D, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
self.bn = nn.BatchNorm3d(out_channels)
def forward(self, x):
return F.relu(self.bn(self.conv(x)), inplace=True)
class CostRegNetAACVP(nn.Module):
def __init__(self, in_channels):
super(CostRegNetAACVP, self).__init__()
# 16->in_channels
self.conv0 = ConvBnReLU3D(in_channels, 16, kernel_size=3, padding=1)
self.conv0a = ConvBnReLU3D(16, 16, kernel_size=3, padding=1)
self.conv1 = ConvBnReLU3D(16, 32, stride=2, kernel_size=3, padding=1)
self.conv2 = ConvBnReLU3D(32, 32, kernel_size=3, padding=1)
self.conv2a = ConvBnReLU3D(32, 32, kernel_size=3, padding=1)
self.conv3 = ConvBnReLU3D(32, 64, kernel_size=3, padding=1)
self.conv4 = ConvBnReLU3D(64, 64, kernel_size=3, padding=1)
self.conv4a = ConvBnReLU3D(64, 64, kernel_size=3, padding=1)
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(64, 32, kernel_size=3, padding=1, output_padding=0, stride=1, bias=False),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True))
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(32, 16, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True))
self.prob0 = nn.Conv3d(16, 1, 3, stride=1, padding=1)
def forward(self, x):
conv0 = self.conv0a(self.conv0(x))
conv2 = self.conv2a(self.conv2(self.conv1(conv0)))
conv4 = self.conv4a(self.conv4(self.conv3(conv2)))
conv5 = conv2 + self.conv5(conv4)
conv6 = conv0 + self.conv6(conv5)
prob = self.prob0(conv6).squeeze(1)
return prob
def sL1_loss(depth_est, depth_gt, mask):
return F.smooth_l1_loss(depth_est[mask], depth_gt[mask], reduction='mean')
def MSE_loss(depth_est, depth_gt, mask):
return F.mse_loss(depth_est[mask], depth_gt[mask], size_average=True)
# Here is the network
class AACVPMVSNet(nn.Module):
def __init__(self, args, group=4, num_heads=1):
super(AACVPMVSNet, self).__init__()
self.featurePyramid = FeaturePyramid(num_heads=num_heads)
self.args = args
self.Group = group
self.cost_reg_refine = CostRegNetAACVP(in_channels=self.Group)
def forward(self, ref_img, src_imgs, ref_in, src_in, ref_ex, src_ex, depth_min, depth_max):
# initialization
depth_est_list = []
output = {}
# Step 1: Feature extraction. Self-attention is used here.
ref_feature_pyramid = self.featurePyramid(ref_img, self.args.nscale)
src_feature_pyramids = []
for i in range(self.args.nsrc):
src_feature_pyramids.append(self.featurePyramid(src_imgs[:, i, :, :, :], self.args.nscale))
# in. and ex. matrices
ref_in_multiscales = conditionIntrinsics(ref_in, ref_img.shape,
[feature.shape for feature in ref_feature_pyramid])
src_in_multiscales = []
for i in range(self.args.nsrc):
src_in_multiscales.append(conditionIntrinsics(src_in[:, i], ref_img.shape,
[feature.shape for feature in src_feature_pyramids[i]]))
src_in_multiscales = torch.stack(src_in_multiscales).permute(1, 0, 2, 3, 4)
# Step 2: estimate the depth map at the coarsest level.
# nhypothesis = 48 for DTU Dataset as default.
depth_hypos = calSweepingDepthHypo(ref_in_multiscales[:, -1], src_in_multiscales[:, 0, -1], ref_ex, src_ex,
depth_min, depth_max, nhypothesis_init=48)
# Step 3: Cost Volume Pyramid calculated here.
ref_volume = ref_feature_pyramid[-1].unsqueeze(2).repeat(1, 1, len(depth_hypos[0]), 1, 1)
B, C, H, W = src_feature_pyramids[0][0].shape
V = self.args.nsrc
# Kwea3 implementation as reference
ref_volume = ref_volume.view(B, self.Group, C // self.Group, *ref_volume.shape[-3:])
volume_sum = 0
warp_volumes = None
for src_idx in range(self.args.nsrc):
# warpped features
warped_volume = homo_warping(src_feature_pyramids[src_idx][-1], ref_in_multiscales[:, -1],
src_in_multiscales[:, src_idx, -1, :, :],
ref_ex, src_ex[:, src_idx], depth_hypos)
## regular solution
warped_volume = warped_volume.view(*ref_volume.shape)
if self.args.mode == "train":
# (B, Groups, C//Groups, D, h, w)
volume_sum = volume_sum + warped_volume
else:
volume_sum += warped_volume
del warped_volume
## Aggregate multiple feature volumes by Similarity
## The parameter V is a little different with that in implementation of Kwea123
## V = nsrc here, while V denotes the quantity of all the input images in the implementation of Kwea123.
cost_volume = (volume_sum * ref_volume).mean(2).div_(V)
# Step 4: Estimate the Prob.
cost_reg = self.cost_reg_refine(cost_volume).squeeze(1)
# Release the GPU burden.
if self.args.mode == "test":
del volume_sum
del ref_volume
del warp_volumes
prob_volume = F.softmax(cost_reg, dim=1)
depth = depth_regression(prob_volume, depth_values=depth_hypos)
depth_est_list.append(depth)
# Step 5: Estimate the residual at each level.
for level in range(self.args.nscale - 2, -1, -1):
# Upsample
depth_up = nn.functional.interpolate(depth[None, :], size=None, scale_factor=2, mode='bicubic',
align_corners=None)
depth_up = depth_up.squeeze(0)
depth_hypos = calDepthHypo(self.args, depth_up, ref_in_multiscales[:, level, :, :],
src_in_multiscales[:, :, level, :, :], ref_ex, src_ex, depth_min, depth_max,
level)
cost_volume = proj_cost_AACVP(Group=self.Group, settings=self.args, ref_feature=ref_feature_pyramid[level],
src_feature=src_feature_pyramids,
level=level, ref_in=ref_in_multiscales[:, level, :, :],
src_in=src_in_multiscales[:, :, level, :, :], ref_ex=ref_ex,
src_ex=src_ex[:, :], depth_hypos=depth_hypos)
cost_reg2 = self.cost_reg_refine(cost_volume).squeeze(1)
if self.args.mode == "test":
del cost_volume
prob_volume = F.softmax(cost_reg2, dim=1)
if self.args.mode == "test":
del cost_reg2
# Depth regression
depth = depth_regression_refine(prob_volume, depth_hypos)
depth_est_list.append(depth)
# Step 6: Get the final result.
with torch.no_grad():
num_depth = prob_volume.shape[1]
prob_volume_sum4 = 4 * F.avg_pool3d(F.pad(prob_volume.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1),
stride=1, padding=0).squeeze(1)
depth_index = depth_regression(prob_volume, depth_values=torch.arange(num_depth, device=prob_volume.device,
dtype=torch.float)).long()
prob_confidence = torch.gather(prob_volume_sum4, 1, depth_index.unsqueeze(1)).squeeze(1)
if self.args.mode == "test":
del prob_volume
del depth
## For T&T and BlendedMVS dataset, the masks are fused with each level at given conf. to avoid noise pixels.
## This part is not implemented here.
## Return
depth_est_list.reverse() # Reverse the list so that depth_est_list[0] is the largest scale.
output["depth_est_list"] = depth_est_list
output["prob_confidence"] = prob_confidence
return output
| 8,747 | 45.042105 | 119 | py |
AACVP-MVSNet | AACVP-MVSNet-main/datasets/utils.py | # Data io utilities for the dataloader
# by: Jiayu Yang
# date: 2019-07-31
# Note: This file use part of the code from the following projects.
# Thanks for the authors for the great code.
# MVSNet: https://github.com/YoYo000/MVSNet
# MVSNet_pytorch: https://github.com/xy-guo/MVSNet_pytorch
import numpy as np
import re
import sys
from PIL import Image
import os, errno
# For debug:
# import matplotlib.pyplot as plt
# import pdb
def readScanList(scal_list_file,mode,logger):
logger.info("Reading scan list...")
scan_list_f = open(scal_list_file, "r")
scan_list = scan_list_f.read()
scan_list = scan_list.split()
scan_list_f.close()
logger.info("Done, Using following scans for "+mode+":\n"+str(scan_list))
return scan_list
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def save_pfm(filename, image, scale=1):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(filename, "wb")
color = None
image = np.flipud(image)
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
file.write('{} {}\n'.format(image.shape[1], image.shape[0]).encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write(('%f\n' % scale).encode('utf-8'))
image.tofile(file)
file.close()
def read_cam_file(filename):
with open(filename) as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0])
depth_interval = float(lines[11].split()[1])
depth_max = depth_min+(256*depth_interval)
return intrinsics, extrinsics, depth_min, depth_max
def write_cam(filename, intrinsic, extrinsic, depth_min, depth_max):
with open(filename, 'w') as f:
f.write('extrinsic\n')
for j in range(4):
for k in range(4):
f.write(str(extrinsic[j, k]) + ' ')
f.write('\n')
f.write('\nintrinsic\n')
for j in range(3):
for k in range(3):
f.write(str(intrinsic[j, k]) + ' ')
f.write('\n')
f.write('\n%f %f\n' % (depth_min,depth_max))
def read_img(filename):
img = Image.open(filename)
# scale 0~255 to 0~1
img = np.array(img, dtype=np.float32) / 255.
# for CVP
if img.shape[0] == 1200:
## normal & group with new costregnet
img = img[:1184,:1600,:]
# group
# img = img[:1152,:1536,:]
return img
def write_img(filename,image):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
image.save(filename)
return 1
def read_depth(filename):
# read pfm depth file
return np.array(read_pfm(filename)[0], dtype=np.float32)
| 4,744 | 28.47205 | 96 | py |
AACVP-MVSNet | AACVP-MVSNet-main/datasets/dtu_loader.py | # Dataloader for the DTU dataset in Yaoyao's format.
# by: Jiayu Yang
# date: 2020-01-28
# Note: This file use part of the code from the following projects.
# Thanks for the authors for the great code.
# MVSNet: https://github.com/YoYo000/MVSNet
# MVSNet_pytorch: https://github.com/xy-guo/MVSNet_pytorch
from .utils import *
from .dataPaths import *
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
class MVSDataset(Dataset):
def __init__(self, args, logger=None):
# Initializing the dataloader
super(MVSDataset, self).__init__()
# Parse input
self.args = args
self.data_root = self.args.dataset_root
self.scan_list_file = getScanListFile(self.data_root,self.args.mode)
self.pair_list_file = getPairListFile(self.data_root,self.args.mode)
self.logger = logger
if logger==None:
import logger
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
self.logger.addHandler(consoleHandler)
self.logger.info("File logger not configured, only writing logs to stdout.")
self.logger.info("Initiating dataloader for our pre-processed DTU dataset.")
self.logger.info("Using dataset:"+self.data_root+self.args.mode+"/")
self.metas = self.build_list(self.args.mode)
self.logger.info("Dataloader initialized.")
def build_list(self,mode):
# Build the item meta list
metas = []
# Read scan list
scan_list = readScanList(self.scan_list_file,self.args.mode, self.logger)
# Read pairs list
for scan in scan_list:
with open(self.pair_list_file) as f:
num_viewpoint = int(f.readline())
# viewpoints (49)
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
# light conditions 0-6
if mode == "train":
for light_idx in range(7):
metas.append((scan, ref_view, src_views, light_idx))
elif mode == "test":
metas.append((scan, ref_view, src_views, 3))
self.logger.info("Done. metas:"+str(len(metas)))
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
scan, ref_view, src_views, light_idx = meta
assert self.args.nsrc <= len(src_views)
self.logger.debug("Getting Item:\nscan:"+str(scan)+"\nref_view:"+str(ref_view)+"\nsrc_view:"+str(src_views)+"\nlight_idx"+str(light_idx))
ref_img = []
src_imgs = []
ref_depths = []
ref_depth_mask = []
ref_intrinsics = []
src_intrinsics = []
ref_extrinsics = []
src_extrinsics = []
depth_min = []
depth_max = []
## 1. Read images
# ref image
ref_img_file = getImageFile(self.data_root,self.args.mode,scan,ref_view,light_idx)
ref_img = read_img(ref_img_file)
# src image(s)
for i in range(self.args.nsrc):
src_img_file = getImageFile(self.data_root,self.args.mode,scan,src_views[i],light_idx)
src_img = read_img(src_img_file)
src_imgs.append(src_img)
## 2. Read camera parameters
cam_file = getCameraFile(self.data_root,self.args.mode,ref_view)
ref_intrinsics, ref_extrinsics, depth_min, depth_max = read_cam_file(cam_file)
for i in range(self.args.nsrc):
cam_file = getCameraFile(self.data_root,self.args.mode,src_views[i])
intrinsics, extrinsics, depth_min_tmp, depth_max_tmp = read_cam_file(cam_file)
src_intrinsics.append(intrinsics)
src_extrinsics.append(extrinsics)
## 3. Read Depth Maps
if self.args.mode == "train":
imgsize = self.args.imgsize
nscale = self.args.nscale
# Read depth map of same size as input image first.
depth_file = getDepthFile(self.data_root,self.args.mode,scan,ref_view)
ref_depth = read_depth(depth_file)
depth_frame_size = (ref_depth.shape[0],ref_depth.shape[1])
frame = np.zeros(depth_frame_size)
frame[:ref_depth.shape[0],:ref_depth.shape[1]] = ref_depth
ref_depths.append(frame)
# Downsample the depth for each scale.
ref_depth = Image.fromarray(ref_depth)
original_size = np.array(ref_depth.size).astype(int)
for scale in range(1,nscale):
new_size = (original_size/(2**scale)).astype(int)
down_depth = ref_depth.resize((new_size),Image.BICUBIC)
frame = np.zeros(depth_frame_size)
down_np_depth = np.array(down_depth)
frame[:down_np_depth.shape[0],:down_np_depth.shape[1]] = down_np_depth
ref_depths.append(frame)
# Orgnize output and return
sample = {}
sample["ref_img"] = np.moveaxis(np.array(ref_img),2,0)
sample["src_imgs"] = np.moveaxis(np.array(src_imgs),3,1)
sample["ref_intrinsics"] = np.array(ref_intrinsics)
sample["src_intrinsics"] = np.array(src_intrinsics)
sample["ref_extrinsics"] = np.array(ref_extrinsics)
sample["src_extrinsics"] = np.array(src_extrinsics)
sample["depth_min"] = depth_min
sample["depth_max"] = depth_max
# print(sample)
if self.args.mode == "train":
sample["ref_depths"] = np.array(ref_depths,dtype=float)
sample["ref_depth_mask"] = np.array(ref_depth_mask)
elif self.args.mode == "test":
sample["filename"] = scan + '/{}/' + '{:0>8}'.format(ref_view) + "{}"
return sample
| 6,228 | 38.176101 | 145 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/train_feature_extractor.py | import time
import sys
from data_loader.h36m_loader import Human36M
[sys.path.append(i) for i in ['.', '..']]
from torch import optim
import torch.nn.functional as F
import matplotlib
from model.embedding_net import EmbeddingNet
from train_eval.train_joint_embed import eval_embed
from utils.average_meter import AverageMeter
matplotlib.use('Agg') # we don't use interactive GUI
from config.parse_args import parse_args
from data_loader.lmdb_data_loader import *
import utils.train_utils
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def evaluate_testset(test_data_loader, generator):
# to evaluation mode
generator.train(False)
losses = AverageMeter('loss')
start = time.time()
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
target_poses, target_vec = data
batch_size = target_vec.size(0)
target = target_vec.to(device)
loss, _ = eval_embed(None, None, None, target, generator)
losses.update(loss.item(), batch_size)
# back to training mode
generator.train(True)
# print
ret_dict = {'loss': losses.avg}
elapsed_time = time.time() - start
logging.info('[VAL] loss: {:.3f} / {:.1f}s'.format(losses.avg, elapsed_time))
return ret_dict
def train_iter(args, epoch, target_data, net, optim):
# zero gradients
optim.zero_grad()
variational_encoding = False # AE or VAE
# reconstruction loss
context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, recon_data = \
net(None, None, None, target_data, None, variational_encoding=variational_encoding)
recon_loss = F.l1_loss(recon_data, target_data, reduction='none')
recon_loss = torch.mean(recon_loss, dim=(1, 2))
if True: # use pose diff
target_diff = target_data[:, 1:] - target_data[:, :-1]
recon_diff = recon_data[:, 1:] - recon_data[:, :-1]
recon_loss += torch.mean(F.l1_loss(recon_diff, target_diff, reduction='none'), dim=(1, 2))
recon_loss = torch.sum(recon_loss)
# KLD
if variational_encoding:
if net.mode == 'speech':
KLD = -0.5 * torch.sum(1 + context_logvar - context_mu.pow(2) - context_logvar.exp())
else:
KLD = -0.5 * torch.sum(1 + pose_logvar - pose_mu.pow(2) - pose_logvar.exp())
if epoch < 10:
KLD_weight = 0
else:
KLD_weight = min(1.0, (epoch - 10) * 0.05)
recon_weight = 100
loss = recon_weight * recon_loss + KLD_weight * KLD
else:
recon_weight = 1
loss = recon_weight * recon_loss
loss.backward()
optim.step()
ret_dict = {'loss': recon_weight * recon_loss.item()}
if variational_encoding:
ret_dict['KLD'] = KLD_weight * KLD.item()
return ret_dict
def main(config):
args = config['args']
# random seed
if args.random_seed >= 0:
utils.train_utils.set_random_seed(args.random_seed)
# set logger
utils.train_utils.set_logger(args.model_save_path, os.path.basename(__file__).replace('.py', '.log'))
# dataset
mean_dir_vec = np.squeeze(np.array(args.mean_dir_vec))
path = 'data/h36m/data_3d_h36m.npz' # from https://github.com/facebookresearch/VideoPose3D/blob/master/DATASETS.md
train_dataset = Human36M(path, mean_dir_vec, is_train=True, augment=False)
val_dataset = Human36M(path, mean_dir_vec, is_train=False, augment=False)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=False, drop_last=True)
# train
pose_dim = 27 # 9 x 3
start = time.time()
loss_meters = [AverageMeter('loss'), AverageMeter('var_loss')]
best_val_loss = (1e+10, 0) # value, epoch
# interval params
print_interval = int(len(train_loader) / 5)
save_sample_result_epoch_interval = 10
save_model_epoch_interval = 20
# init model and optimizer
generator = EmbeddingNet(args, pose_dim, args.n_poses, None, None, None, mode='pose').to(device)
gen_optimizer = optim.Adam(generator.parameters(), lr=args.learning_rate, betas=(0.5, 0.999))
# training
global_iter = 0
best_values = {} # best values for all loss metrics
for epoch in range(args.epochs):
# evaluate the test set
val_metrics = evaluate_testset(test_loader, generator)
# best?
val_loss = val_metrics['loss']
is_best = val_loss < best_val_loss[0]
if is_best:
logging.info(' *** BEST VALIDATION LOSS: {:.3f}'.format(val_loss))
best_val_loss = (val_loss, epoch)
else:
logging.info(' best validation loss so far: {:.3f} at EPOCH {}'.format(best_val_loss[0], best_val_loss[1]))
# save model
if is_best or (epoch % save_model_epoch_interval == 0 and epoch > 0):
gen_state_dict = generator.state_dict()
if is_best:
save_name = '{}/{}_checkpoint_best.bin'.format(args.model_save_path, args.name)
utils.train_utils.save_checkpoint({
'args': args, 'epoch': epoch, 'pose_dim': pose_dim, 'gen_dict': gen_state_dict,
}, save_name)
# save sample results
if args.save_result_video and epoch % save_sample_result_epoch_interval == 0:
evaluate_sample_and_save_video(epoch, args.name, test_loader, generator, args=args)
# train iter
iter_start_time = time.time()
for iter_idx, (target_pose, target_vec) in enumerate(train_loader, 0):
global_iter += 1
batch_size = target_vec.size(0)
target_vec = target_vec.to(device)
loss = train_iter(args, epoch, target_vec, generator, gen_optimizer)
# loss values
for loss_meter in loss_meters:
name = loss_meter.name
if name in loss:
loss_meter.update(loss[name], batch_size)
# print training status
if (iter_idx + 1) % print_interval == 0:
print_summary = 'EP {} ({:3d}) | {:>8s}, {:.0f} samples/s | '.format(
epoch, iter_idx + 1, utils.train_utils.time_since(start),
batch_size / (time.time() - iter_start_time))
for loss_meter in loss_meters:
if loss_meter.count > 0:
print_summary += '{}: {:.3f}, '.format(loss_meter.name, loss_meter.avg)
loss_meter.reset()
logging.info(print_summary)
iter_start_time = time.time()
# print best losses
logging.info('--------- best loss values ---------')
for key in best_values.keys():
logging.info('{}: {:.3f} at EPOCH {}'.format(key, best_values[key][0], best_values[key][1]))
def evaluate_sample_and_save_video(epoch, prefix, test_data_loader, generator, args, n_save=None, save_path=None):
generator.train(False) # eval mode
start = time.time()
if not n_save:
n_save = 1 if epoch <= 0 else 5
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
if iter_idx >= n_save: # save N samples
break
_, target_dir_vec = data
# prepare
select_index = 20
target_dir_vec = target_dir_vec[select_index, :, :].unsqueeze(0).to(device)
# generation
_, _, _, _, _, _, out_dir_vec = generator(None, None, None, target_dir_vec, variational_encoding=False)
# to video
target_dir_vec = np.squeeze(target_dir_vec.cpu().numpy())
out_dir_vec = np.squeeze(out_dir_vec.cpu().numpy())
if save_path is None:
save_path = args.model_save_path
mean_data = np.array(args.mean_dir_vec).reshape(-1, 3)
utils.train_utils.create_video_and_save(
save_path, epoch, prefix, iter_idx,
target_dir_vec, out_dir_vec, mean_data, '')
generator.train(True) # back to training mode
logging.info('saved sample videos, took {:.1f}s'.format(time.time() - start))
return True
if __name__ == '__main__':
_args = parse_args()
main({'args': _args})
| 8,374 | 34.189076 | 120 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/synthesize.py | import datetime
import logging
import math
import os
import pickle
import random
import sys
import librosa
import soundfile as sf
import lmdb
import numpy as np
import time
import pyarrow
import torch
from torch.utils.data import DataLoader
import utils
from data_loader.lmdb_data_loader import SpeechMotionDataset, default_collate_fn, word_seq_collate_fn
from model.embedding_space_evaluator import EmbeddingSpaceEvaluator
from train import evaluate_testset
from utils.data_utils import extract_melspectrogram, remove_tags_marks, convert_dir_vec_to_pose
from utils.train_utils import create_video_and_save, set_logger
from utils.tts_helper import TTSHelper
sys.path.insert(0, '../../gentle')
import gentle
from data_loader.data_preprocessor import DataPreprocessor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gentle_resources = gentle.Resources()
def generate_gestures(args, pose_decoder, lang_model, audio, words, audio_sr=16000, vid=None,
seed_seq=None, fade_out=False):
out_list = []
n_frames = args.n_poses
clip_length = len(audio) / audio_sr
use_spectrogram = False
if args.model == 'speech2gesture':
use_spectrogram = True
# pre seq
pre_seq = torch.zeros((1, n_frames, len(args.mean_dir_vec) + 1))
if seed_seq is not None:
pre_seq[0, 0:args.n_pre_poses, :-1] = torch.Tensor(seed_seq[0:args.n_pre_poses])
pre_seq[0, 0:args.n_pre_poses, -1] = 1 # indicating bit for seed poses
sr = 16000
spectrogram = None
if use_spectrogram:
# audio to spectrogram
spectrogram = extract_melspectrogram(audio, sr)
# divide into synthesize units and do synthesize
unit_time = args.n_poses / args.motion_resampling_framerate
stride_time = (args.n_poses - args.n_pre_poses) / args.motion_resampling_framerate
if clip_length < unit_time:
num_subdivision = 1
else:
num_subdivision = math.ceil((clip_length - unit_time) / stride_time) + 1
spectrogram_sample_length = int(round(unit_time * sr / 512))
audio_sample_length = int(unit_time * audio_sr)
end_padding_duration = 0
# prepare speaker input
if args.z_type == 'speaker':
if not vid:
vid = random.randrange(pose_decoder.z_obj.n_words)
print('vid:', vid)
vid = torch.LongTensor([vid]).to(device)
else:
vid = None
print('{}, {}, {}, {}, {}'.format(num_subdivision, unit_time, clip_length, stride_time, audio_sample_length))
out_dir_vec = None
start = time.time()
for i in range(0, num_subdivision):
start_time = i * stride_time
end_time = start_time + unit_time
# prepare spectrogram input
in_spec = None
if use_spectrogram:
# prepare spec input
audio_start = math.floor(start_time / clip_length * spectrogram.shape[0])
audio_end = audio_start + spectrogram_sample_length
in_spec = spectrogram[:, audio_start:audio_end]
in_spec = torch.from_numpy(in_spec).unsqueeze(0).to(device)
# prepare audio input
audio_start = math.floor(start_time / clip_length * len(audio))
audio_end = audio_start + audio_sample_length
in_audio = audio[audio_start:audio_end]
if len(in_audio) < audio_sample_length:
if i == num_subdivision - 1:
end_padding_duration = audio_sample_length - len(in_audio)
in_audio = np.pad(in_audio, (0, audio_sample_length - len(in_audio)), 'constant')
in_audio = torch.from_numpy(in_audio).unsqueeze(0).to(device).float()
# prepare text input
word_seq = DataPreprocessor.get_words_in_time_range(word_list=words, start_time=start_time, end_time=end_time)
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
word_indices = np.zeros(len(word_seq) + 2)
word_indices[0] = lang_model.SOS_token
word_indices[-1] = lang_model.EOS_token
frame_duration = (end_time - start_time) / n_frames
for w_i, word in enumerate(word_seq):
print(word[0], end=', ')
idx = max(0, int(np.floor((word[1] - start_time) / frame_duration)))
extended_word_indices[idx] = lang_model.get_word_index(word[0])
word_indices[w_i + 1] = lang_model.get_word_index(word[0])
print(' ')
in_text_padded = torch.LongTensor(extended_word_indices).unsqueeze(0).to(device)
in_text = torch.LongTensor(word_indices).unsqueeze(0).to(device)
# prepare pre seq
if i > 0:
pre_seq[0, 0:args.n_pre_poses, :-1] = out_dir_vec.squeeze(0)[-args.n_pre_poses:]
pre_seq[0, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
pre_seq = pre_seq.float().to(device)
pre_seq_partial = pre_seq[0, 0:args.n_pre_poses, :-1].unsqueeze(0)
# synthesize
print(in_text_padded)
if args.model == 'multimodal_context':
out_dir_vec, *_ = pose_decoder(pre_seq, in_text_padded, in_audio, vid)
elif args.model == 'joint_embedding':
_, _, _, _, _, _, out_dir_vec = pose_decoder(in_text_padded, in_audio, pre_seq_partial, None, 'speech')
elif args.model == 'seq2seq':
words_lengths = torch.LongTensor([in_text.shape[1]]).to(device)
out_dir_vec = pose_decoder(in_text, words_lengths, pre_seq_partial, None)
elif args.model == 'speech2gesture':
out_dir_vec = pose_decoder(in_spec, pre_seq_partial)
else:
assert False
out_seq = out_dir_vec[0, :, :].data.cpu().numpy()
# smoothing motion transition
if len(out_list) > 0:
last_poses = out_list[-1][-args.n_pre_poses:]
out_list[-1] = out_list[-1][:-args.n_pre_poses] # delete last 4 frames
for j in range(len(last_poses)):
n = len(last_poses)
prev = last_poses[j]
next = out_seq[j]
out_seq[j] = prev * (n - j) / (n + 1) + next * (j + 1) / (n + 1)
out_list.append(out_seq)
print('generation took {:.2} s'.format((time.time() - start) / num_subdivision))
# aggregate results
out_dir_vec = np.vstack(out_list)
# additional interpolation for seq2seq
if args.model == 'seq2seq':
n_smooth = args.n_pre_poses
for i in range(num_subdivision):
start_frame = args.n_pre_poses + i * (args.n_poses - args.n_pre_poses) - n_smooth
if start_frame < 0:
start_frame = 0
end_frame = start_frame + n_smooth * 2
else:
end_frame = start_frame + n_smooth * 3
# spline interp
y = out_dir_vec[start_frame:end_frame]
x = np.array(range(0, y.shape[0]))
w = np.ones(len(y))
w[0] = 5
w[-1] = 5
coeffs = np.polyfit(x, y, 3)
fit_functions = [np.poly1d(coeffs[:, k]) for k in range(0, y.shape[1])]
interpolated_y = [fit_functions[k](x) for k in range(0, y.shape[1])]
interpolated_y = np.transpose(np.asarray(interpolated_y)) # (num_frames x dims)
out_dir_vec[start_frame:end_frame] = interpolated_y
# fade out to the mean pose
if fade_out:
n_smooth = args.n_pre_poses
start_frame = len(out_dir_vec) - int(end_padding_duration / audio_sr * args.motion_resampling_framerate)
end_frame = start_frame + n_smooth * 2
if len(out_dir_vec) < end_frame:
out_dir_vec = np.pad(out_dir_vec, [(0, end_frame - len(out_dir_vec)), (0, 0)], mode='constant')
out_dir_vec[end_frame-n_smooth:] = np.zeros((len(args.mean_dir_vec))) # fade out to mean poses
# interpolation
y = out_dir_vec[start_frame:end_frame]
x = np.array(range(0, y.shape[0]))
w = np.ones(len(y))
w[0] = 5
w[-1] = 5
coeffs = np.polyfit(x, y, 2, w=w)
fit_functions = [np.poly1d(coeffs[:, k]) for k in range(0, y.shape[1])]
interpolated_y = [fit_functions[k](x) for k in range(0, y.shape[1])]
interpolated_y = np.transpose(np.asarray(interpolated_y)) # (num_frames x dims)
out_dir_vec[start_frame:end_frame] = interpolated_y
return out_dir_vec
def align_words(audio, text):
# resample audio to 8K
audio_8k = librosa.resample(audio, 16000, 8000)
wave_file = 'output/temp.wav'
sf.write(wave_file, audio_8k, 8000, 'PCM_16')
# run gentle to align words
aligner = gentle.ForcedAligner(gentle_resources, text, nthreads=2, disfluency=False,
conservative=False)
gentle_out = aligner.transcribe(wave_file, logging=logging)
words_with_timestamps = []
for i, gentle_word in enumerate(gentle_out.words):
if gentle_word.case == 'success':
words_with_timestamps.append([gentle_word.word, gentle_word.start, gentle_word.end])
elif 0 < i < len(gentle_out.words) - 1:
words_with_timestamps.append([gentle_word.word, gentle_out.words[i-1].end, gentle_out.words[i+1].start])
return words_with_timestamps
def main(mode, checkpoint_path, option):
args, generator, loss_fn, lang_model, speaker_model, out_dim = utils.train_utils.load_checkpoint_and_model(
checkpoint_path, device)
result_save_path = 'output/generation_results'
# load mean vec
mean_pose = np.array(args.mean_pose).squeeze()
mean_dir_vec = np.array(args.mean_dir_vec).squeeze()
# load lang_model
vocab_cache_path = os.path.join('data/ted_dataset', 'vocab_cache.pkl')
with open(vocab_cache_path, 'rb') as f:
lang_model = pickle.load(f)
if args.model == 'seq2seq':
collate_fn = word_seq_collate_fn
else:
collate_fn = default_collate_fn
def load_dataset(path):
dataset = SpeechMotionDataset(path,
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
speaker_model=speaker_model,
mean_pose=mean_pose,
mean_dir_vec=mean_dir_vec
)
print(len(dataset))
return dataset
if mode == 'eval':
val_data_path = 'data/ted_dataset/lmdb_val'
eval_net_path = 'output/train_h36m_gesture_autoencoder/gesture_autoencoder_checkpoint_best.bin'
embed_space_evaluator = EmbeddingSpaceEvaluator(args, eval_net_path, lang_model, device)
val_dataset = load_dataset(val_data_path)
data_loader = DataLoader(dataset=val_dataset, batch_size=32, collate_fn=collate_fn,
shuffle=False, drop_last=True, num_workers=args.loader_workers)
val_dataset.set_lang_model(lang_model)
evaluate_testset(data_loader, generator, loss_fn, embed_space_evaluator, args)
elif mode == 'from_text':
random.seed()
examples = [
'<break time="0.5s"/><prosody>once handed me a very thick book. <break time="0.1s"/>it was his familys legacy</prosody>',
'<break time="0.5s"/>we can help millions of teens with counseling',
'what an amazing day that will be. what a big opportunity we have.',
'just the way a surgeon operates on a patient you can literally interact with your table',
'[Enter a new text]'
]
if option:
voice = option
else:
voice = 'en-female'
vid = random.sample(range(0, speaker_model.n_words), 1)[0]
tts = TTSHelper(cache_path='output/cached_wav')
# text input
for i, example in enumerate(examples):
print('(%d) %s' % (i, example))
try:
select = int(input("select: "))
except ValueError:
exit(0)
if select == len(examples) - 1:
input_text = input("text: ")
elif select >= len(examples) or select < 0:
print('Please input a valid number. Exiting...')
exit(0)
else:
input_text = examples[select]
# generation
text_without_tags = remove_tags_marks(input_text)
print(text_without_tags)
tts_filename = tts.synthesis(input_text, voice_name=voice, verbose=False)
sound_obj, duration = tts.get_sound_obj(tts_filename)
print('TTS complete (audio length: {0:.1f}s)'.format(duration))
audio, audio_sr = librosa.load(tts_filename, mono=True, sr=16000, res_type='kaiser_fast')
words_with_timestamps = align_words(audio, text_without_tags)
dir_vec = generate_gestures(args, generator, lang_model, audio, words_with_timestamps, vid=vid,
fade_out=False)
# make a video
save_path = 'output/generation_results'
os.makedirs(save_path, exist_ok=True)
prefix = '{}_vid_{}_{}'.format(text_without_tags[:50], vid, voice)
out_pos, _ = create_video_and_save(
save_path, 0, prefix, 0, None, dir_vec, mean_dir_vec, text_without_tags, audio=audio,
clipping_to_shortest_stream=True, delete_audio_file=False)
# save pkl
save_dict = {
'sentence': words_with_timestamps, 'audio': audio,
'out_dir_vec': dir_vec + mean_dir_vec, 'out_poses': out_pos,
'aux_info': ''
}
with open(os.path.join(result_save_path, '{}.pkl'.format(prefix)), 'wb') as f:
pickle.dump(save_dict, f)
elif mode == 'from_db_clip':
test_data_path = 'data/ted_dataset/lmdb_test'
save_path = 'output/generation_results'
clip_duration_range = [5, 12]
random.seed()
if option:
n_generations = int(option)
else:
n_generations = 5
# load clips and make gestures
n_saved = 0
lmdb_env = lmdb.open(test_data_path, readonly=True, lock=False)
with lmdb_env.begin(write=False) as txn:
keys = [key for key, _ in txn.cursor()]
while n_saved < n_generations: # loop until we get the desired number of results
# select video
key = random.choice(keys)
buf = txn.get(key)
video = pyarrow.deserialize(buf)
vid = video['vid']
clips = video['clips']
# select clip
n_clips = len(clips)
if n_clips == 0:
continue
clip_idx = random.randrange(n_clips)
clip_poses = clips[clip_idx]['skeletons_3d']
clip_audio = clips[clip_idx]['audio_raw']
clip_words = clips[clip_idx]['words']
clip_time = [clips[clip_idx]['start_time'], clips[clip_idx]['end_time']]
clip_poses = utils.data_utils.resample_pose_seq(clip_poses, clip_time[1] - clip_time[0],
args.motion_resampling_framerate)
target_dir_vec = utils.data_utils.convert_pose_seq_to_dir_vec(clip_poses)
target_dir_vec = target_dir_vec.reshape(target_dir_vec.shape[0], -1)
target_dir_vec -= mean_dir_vec
# check duration
clip_duration = clip_time[1] - clip_time[0]
if clip_duration < clip_duration_range[0] or clip_duration > clip_duration_range[1]:
continue
# synthesize
for selected_vi in range(len(clip_words)): # make start time of input text zero
clip_words[selected_vi][1] -= clip_time[0] # start time
clip_words[selected_vi][2] -= clip_time[0] # end time
vid_idx = random.sample(range(0, speaker_model.n_words), 1)[0]
out_dir_vec = generate_gestures(args, generator, lang_model, clip_audio, clip_words, vid=vid_idx,
seed_seq=target_dir_vec[0:args.n_pre_poses], fade_out=False)
# make a video
sentence_words = []
for word, _, _ in clip_words:
sentence_words.append(word)
sentence = ' '.join(sentence_words)
os.makedirs(save_path, exist_ok=True)
filename_prefix = '{}_{}_{}'.format(vid, vid_idx, clip_idx)
filename_prefix_for_video = filename_prefix
aux_str = '({}, time: {}-{})'.format(vid, str(datetime.timedelta(seconds=clip_time[0])),
str(datetime.timedelta(seconds=clip_time[1])))
create_video_and_save(
save_path, 0, filename_prefix_for_video, 0, target_dir_vec, out_dir_vec,
mean_dir_vec, sentence, audio=clip_audio, aux_str=aux_str,
clipping_to_shortest_stream=True, delete_audio_file=False)
# save pkl
out_dir_vec = out_dir_vec + mean_dir_vec
out_poses = convert_dir_vec_to_pose(out_dir_vec)
save_dict = {
'sentence': sentence, 'audio': clip_audio.astype(np.float32),
'out_dir_vec': out_dir_vec, 'out_poses': out_poses,
'aux_info': '{}_{}_{}'.format(vid, vid_idx, clip_idx),
'human_dir_vec': target_dir_vec + mean_dir_vec,
}
with open(os.path.join(save_path, '{}.pkl'.format(filename_prefix)), 'wb') as f:
pickle.dump(save_dict, f)
n_saved += 1
else:
assert False, 'wrong mode'
if __name__ == '__main__':
mode = sys.argv[1] # {eval, from_db_clip, from_text}
ckpt_path = sys.argv[2]
option = None
if len(sys.argv) > 3:
option = sys.argv[3]
set_logger()
main(mode, ckpt_path, option)
| 18,129 | 40.0181 | 133 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/train.py | import pprint
import time
from pathlib import Path
import sys
[sys.path.append(i) for i in ['.', '..']]
import matplotlib
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from model import speech2gesture, vocab
from model.embedding_net import EmbeddingNet
from model.seq2seq_net import Seq2SeqNet
from train_eval.train_gan import train_iter_gan
from train_eval.train_joint_embed import train_iter_embed, eval_embed
from train_eval.train_seq2seq import train_iter_seq2seq
from train_eval.train_speech2gesture import train_iter_speech2gesture
from utils.average_meter import AverageMeter
from utils.data_utils import convert_dir_vec_to_pose
from utils.vocab_utils import build_vocab
matplotlib.use('Agg') # we don't use interactive GUI
from config.parse_args import parse_args
from model.embedding_space_evaluator import EmbeddingSpaceEvaluator
from model.multimodal_context_net import PoseGenerator, ConvDiscriminator
from torch import optim
from data_loader.lmdb_data_loader import *
import utils.train_utils
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def init_model(args, lang_model, speaker_model, pose_dim, _device):
# init model
n_frames = args.n_poses
generator = discriminator = loss_fn = None
if args.model == 'multimodal_context':
generator = PoseGenerator(args,
n_words=lang_model.n_words,
word_embed_size=args.wordembed_dim,
word_embeddings=lang_model.word_embedding_weights,
z_obj=speaker_model,
pose_dim=pose_dim).to(_device)
discriminator = ConvDiscriminator(pose_dim).to(_device)
elif args.model == 'joint_embedding':
generator = EmbeddingNet(args, pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
lang_model.word_embedding_weights, mode='random').to(_device)
elif args.model == 'gesture_autoencoder':
generator = EmbeddingNet(args, pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
lang_model.word_embedding_weights, mode='pose').to(_device)
elif args.model == 'seq2seq':
generator = Seq2SeqNet(args, pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
lang_model.word_embedding_weights).to(_device)
loss_fn = torch.nn.L1Loss()
elif args.model == 'speech2gesture':
generator = speech2gesture.Generator(n_frames, pose_dim, args.n_pre_poses).to(_device)
discriminator = speech2gesture.Discriminator(pose_dim).to(_device)
loss_fn = torch.nn.L1Loss()
return generator, discriminator, loss_fn
def train_epochs(args, train_data_loader, test_data_loader, lang_model, pose_dim, speaker_model=None):
start = time.time()
loss_meters = [AverageMeter('loss'), AverageMeter('var_loss'), AverageMeter('gen'), AverageMeter('dis'),
AverageMeter('KLD'), AverageMeter('DIV_REG')]
best_val_loss = (1e+10, 0) # value, epoch
tb_path = args.name + '_' + str(datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
tb_writer = SummaryWriter(log_dir=str(Path(args.model_save_path).parent / 'tensorboard_runs' / tb_path))
# interval params
print_interval = int(len(train_data_loader) / 5)
save_sample_result_epoch_interval = 10
save_model_epoch_interval = 20
# z type
if args.z_type == 'speaker':
pass
elif args.z_type == 'random':
speaker_model = 1
else:
speaker_model = None
# init model
generator, discriminator, loss_fn = init_model(args, lang_model, speaker_model, pose_dim, device)
# use multi GPUs
if torch.cuda.device_count() > 1:
generator = torch.nn.DataParallel(generator)
if discriminator is not None:
discriminator = torch.nn.DataParallel(discriminator)
# prepare an evaluator for FGD
embed_space_evaluator = None
if args.eval_net_path and len(args.eval_net_path) > 0:
embed_space_evaluator = EmbeddingSpaceEvaluator(args, args.eval_net_path, lang_model, device)
# define optimizers
gen_optimizer = optim.Adam(generator.parameters(), lr=args.learning_rate, betas=(0.5, 0.999))
dis_optimizer = None
if discriminator is not None:
dis_optimizer = torch.optim.Adam(discriminator.parameters(),
lr=args.learning_rate * args.discriminator_lr_weight,
betas=(0.5, 0.999))
# training
global_iter = 0
best_values = {} # best values for all loss metrics
for epoch in range(args.epochs):
# evaluate the test set
val_metrics = evaluate_testset(test_data_loader, generator, loss_fn, embed_space_evaluator, args)
# write to tensorboard and save best values
for key in val_metrics.keys():
tb_writer.add_scalar(key + '/validation', val_metrics[key], global_iter)
if key not in best_values.keys() or val_metrics[key] < best_values[key][0]:
best_values[key] = (val_metrics[key], epoch)
# best?
if 'frechet' in val_metrics.keys():
val_loss = val_metrics['frechet']
else:
val_loss = val_metrics['loss']
is_best = val_loss < best_val_loss[0]
if is_best:
logging.info(' *** BEST VALIDATION LOSS: {:.3f}'.format(val_loss))
best_val_loss = (val_loss, epoch)
else:
logging.info(' best validation loss so far: {:.3f} at EPOCH {}'.format(best_val_loss[0], best_val_loss[1]))
# save model
if is_best or (epoch % save_model_epoch_interval == 0 and epoch > 0):
dis_state_dict = None
try: # multi gpu
gen_state_dict = generator.module.state_dict()
if discriminator is not None:
dis_state_dict = discriminator.module.state_dict()
except AttributeError: # single gpu
gen_state_dict = generator.state_dict()
if discriminator is not None:
dis_state_dict = discriminator.state_dict()
if is_best:
save_name = '{}/{}_checkpoint_best.bin'.format(args.model_save_path, args.name)
else:
save_name = '{}/{}_checkpoint_{:03d}.bin'.format(args.model_save_path, args.name, epoch)
utils.train_utils.save_checkpoint({
'args': args, 'epoch': epoch, 'lang_model': lang_model, 'speaker_model': speaker_model,
'pose_dim': pose_dim, 'gen_dict': gen_state_dict,
'dis_dict': dis_state_dict,
}, save_name)
# save sample results
if args.save_result_video and epoch % save_sample_result_epoch_interval == 0:
evaluate_sample_and_save_video(
epoch, args.name, test_data_loader, generator,
args=args, lang_model=lang_model)
# train iter
iter_start_time = time.time()
for iter_idx, data in enumerate(train_data_loader, 0):
global_iter += 1
in_text, text_lengths, in_text_padded, _, target_vec, in_audio, in_spec, aux_info = data
batch_size = target_vec.size(0)
in_text = in_text.to(device)
in_text_padded = in_text_padded.to(device)
in_audio = in_audio.to(device)
in_spec = in_spec.to(device)
target_vec = target_vec.to(device)
# speaker input
vid_indices = []
if speaker_model and isinstance(speaker_model, vocab.Vocab):
vids = aux_info['vid']
vid_indices = [speaker_model.word2index[vid] for vid in vids]
vid_indices = torch.LongTensor(vid_indices).to(device)
# train
loss = []
if args.model == 'multimodal_context':
loss = train_iter_gan(args, epoch, in_text_padded, in_audio, target_vec, vid_indices,
generator, discriminator,
gen_optimizer, dis_optimizer)
elif args.model == 'joint_embedding':
loss = train_iter_embed(args, epoch, in_text_padded, in_audio, target_vec,
generator, gen_optimizer, mode='random')
elif args.model == 'gesture_autoencoder':
loss = train_iter_embed(args, epoch, in_text_padded, in_audio, target_vec,
generator, gen_optimizer)
elif args.model == 'seq2seq':
loss = train_iter_seq2seq(args, epoch, in_text, text_lengths, target_vec, generator, gen_optimizer)
elif args.model == 'speech2gesture':
loss = train_iter_speech2gesture(args, in_spec, target_vec, generator, discriminator,
gen_optimizer, dis_optimizer, loss_fn)
# loss values
for loss_meter in loss_meters:
name = loss_meter.name
if name in loss:
loss_meter.update(loss[name], batch_size)
# write to tensorboard
for key in loss.keys():
tb_writer.add_scalar(key + '/train', loss[key], global_iter)
# print training status
if (iter_idx + 1) % print_interval == 0:
print_summary = 'EP {} ({:3d}) | {:>8s}, {:.0f} samples/s | '.format(
epoch, iter_idx + 1, utils.train_utils.time_since(start),
batch_size / (time.time() - iter_start_time))
for loss_meter in loss_meters:
if loss_meter.count > 0:
print_summary += '{}: {:.3f}, '.format(loss_meter.name, loss_meter.avg)
loss_meter.reset()
logging.info(print_summary)
iter_start_time = time.time()
tb_writer.close()
# print best losses
logging.info('--------- best loss values ---------')
for key in best_values.keys():
logging.info('{}: {:.3f} at EPOCH {}'.format(key, best_values[key][0], best_values[key][1]))
def evaluate_testset(test_data_loader, generator, loss_fn, embed_space_evaluator, args):
# to evaluation mode
generator.train(False)
if embed_space_evaluator:
embed_space_evaluator.reset()
losses = AverageMeter('loss')
joint_mae = AverageMeter('mae_on_joint')
accel = AverageMeter('accel')
start = time.time()
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
in_text, text_lengths, in_text_padded, _, target_vec, in_audio, in_spec, aux_info = data
batch_size = target_vec.size(0)
in_text = in_text.to(device)
in_text_padded = in_text_padded.to(device)
in_audio = in_audio.to(device)
in_spec = in_spec.to(device)
target = target_vec.to(device)
# speaker input
speaker_model = utils.train_utils.get_speaker_model(generator)
if speaker_model:
vid_indices = [random.choice(list(speaker_model.word2index.values())) for _ in range(batch_size)]
vid_indices = torch.LongTensor(vid_indices).to(device)
else:
vid_indices = None
pre_seq = target.new_zeros((target.shape[0], target.shape[1], target.shape[2] + 1))
pre_seq[:, 0:args.n_pre_poses, :-1] = target[:, 0:args.n_pre_poses]
pre_seq[:, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
pre_seq_partial = pre_seq[:, 0:args.n_pre_poses, :-1]
if args.model == 'joint_embedding':
loss, out_dir_vec = eval_embed(in_text_padded, in_audio, pre_seq_partial,
target, generator, mode='speech')
elif args.model == 'gesture_autoencoder':
loss, _ = eval_embed(in_text_padded, in_audio, pre_seq_partial, target, generator)
elif args.model == 'seq2seq':
out_dir_vec = generator(in_text, text_lengths, target, None)
loss = loss_fn(out_dir_vec, target)
elif args.model == 'speech2gesture':
out_dir_vec = generator(in_spec, pre_seq_partial)
loss = loss_fn(out_dir_vec, target)
elif args.model == 'multimodal_context':
out_dir_vec, *_ = generator(pre_seq, in_text_padded, in_audio, vid_indices)
loss = F.l1_loss(out_dir_vec, target)
else:
assert False
losses.update(loss.item(), batch_size)
if args.model != 'gesture_autoencoder':
if embed_space_evaluator:
embed_space_evaluator.push_samples(in_text_padded, in_audio, out_dir_vec, target)
# calculate MAE of joint coordinates
out_dir_vec = out_dir_vec.cpu().numpy()
out_dir_vec += np.array(args.mean_dir_vec).squeeze()
out_joint_poses = convert_dir_vec_to_pose(out_dir_vec)
target_vec = target_vec.cpu().numpy()
target_vec += np.array(args.mean_dir_vec).squeeze()
target_poses = convert_dir_vec_to_pose(target_vec)
if out_joint_poses.shape[1] == args.n_poses:
diff = out_joint_poses[:, args.n_pre_poses:] - target_poses[:, args.n_pre_poses:]
else:
diff = out_joint_poses - target_poses[:, args.n_pre_poses:]
mae_val = np.mean(np.absolute(diff))
joint_mae.update(mae_val, batch_size)
# accel
target_acc = np.diff(target_poses, n=2, axis=1)
out_acc = np.diff(out_joint_poses, n=2, axis=1)
accel.update(np.mean(np.abs(target_acc - out_acc)), batch_size)
# back to training mode
generator.train(True)
# print
ret_dict = {'loss': losses.avg, 'joint_mae': joint_mae.avg}
elapsed_time = time.time() - start
if embed_space_evaluator and embed_space_evaluator.get_no_of_samples() > 0:
frechet_dist, feat_dist = embed_space_evaluator.get_scores()
logging.info(
'[VAL] loss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f}, FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(
losses.avg, joint_mae.avg, accel.avg, frechet_dist, feat_dist, elapsed_time))
ret_dict['frechet'] = frechet_dist
ret_dict['feat_dist'] = feat_dist
else:
logging.info('[VAL] loss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(
losses.avg, joint_mae.avg, elapsed_time))
return ret_dict
def evaluate_sample_and_save_video(epoch, prefix, test_data_loader, generator, args, lang_model,
n_save=None, save_path=None):
generator.train(False) # eval mode
start = time.time()
if not n_save:
n_save = 1 if epoch <= 0 else 5
out_raw = []
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
if iter_idx >= n_save: # save N samples
break
in_text, text_lengths, in_text_padded, _, target_dir_vec, in_audio, in_spec, aux_info = data
# prepare
select_index = 0
if args.model == 'seq2seq':
in_text = in_text[select_index, :].unsqueeze(0).to(device)
text_lengths = text_lengths[select_index].unsqueeze(0).to(device)
in_text_padded = in_text_padded[select_index, :].unsqueeze(0).to(device)
in_audio = in_audio[select_index, :].unsqueeze(0).to(device)
in_spec = in_spec[select_index, :, :].unsqueeze(0).to(device)
target_dir_vec = target_dir_vec[select_index, :, :].unsqueeze(0).to(device)
input_words = []
for i in range(in_text_padded.shape[1]):
word_idx = int(in_text_padded.data[select_index, i])
if word_idx > 0:
input_words.append(lang_model.index2word[word_idx])
sentence = ' '.join(input_words)
# speaker input
speaker_model = utils.train_utils.get_speaker_model(generator)
if speaker_model:
vid = aux_info['vid'][select_index]
# vid_indices = [speaker_model.word2index[vid]]
vid_indices = [random.choice(list(speaker_model.word2index.values()))]
vid_indices = torch.LongTensor(vid_indices).to(device)
else:
vid_indices = None
# aux info
aux_str = '({}, time: {}-{})'.format(
aux_info['vid'][select_index],
str(datetime.timedelta(seconds=aux_info['start_time'][select_index].item())),
str(datetime.timedelta(seconds=aux_info['end_time'][select_index].item())))
# synthesize
pre_seq = target_dir_vec.new_zeros((target_dir_vec.shape[0], target_dir_vec.shape[1],
target_dir_vec.shape[2] + 1))
pre_seq[:, 0:args.n_pre_poses, :-1] = target_dir_vec[:, 0:args.n_pre_poses]
pre_seq[:, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
pre_seq_partial = pre_seq[:, 0:args.n_pre_poses, :-1]
if args.model == 'multimodal_context':
out_dir_vec, *_ = generator(pre_seq, in_text_padded, in_audio, vid_indices)
elif args.model == 'joint_embedding':
_, _, _, _, _, _, out_dir_vec = generator(in_text_padded, in_audio, pre_seq_partial, None, 'speech')
elif args.model == 'gesture_autoencoder':
_, _, _, _, _, _, out_dir_vec = generator(in_text_padded, in_audio, pre_seq_partial, target_dir_vec,
variational_encoding=False)
elif args.model == 'seq2seq':
out_dir_vec = generator(in_text, text_lengths, target_dir_vec, None)
# out_poses = torch.cat((pre_poses, out_poses), dim=1)
elif args.model == 'speech2gesture':
out_dir_vec = generator(in_spec, pre_seq_partial)
# to video
audio_npy = np.squeeze(in_audio.cpu().numpy())
target_dir_vec = np.squeeze(target_dir_vec.cpu().numpy())
out_dir_vec = np.squeeze(out_dir_vec.cpu().numpy())
if save_path is None:
save_path = args.model_save_path
mean_data = np.array(args.mean_dir_vec).reshape(-1, 3)
utils.train_utils.create_video_and_save(
save_path, epoch, prefix, iter_idx,
target_dir_vec, out_dir_vec, mean_data,
sentence, audio=audio_npy, aux_str=aux_str)
target_dir_vec = target_dir_vec.reshape((target_dir_vec.shape[0], 9, 3))
out_dir_vec = out_dir_vec.reshape((out_dir_vec.shape[0], 9, 3))
out_raw.append({
'sentence': sentence,
'audio': audio_npy,
'human_dir_vec': target_dir_vec + mean_data,
'out_dir_vec': out_dir_vec + mean_data,
'aux_info': aux_str
})
generator.train(True) # back to training mode
logging.info('saved sample videos, took {:.1f}s'.format(time.time() - start))
return out_raw
def main(config):
args = config['args']
# random seed
if args.random_seed >= 0:
utils.train_utils.set_random_seed(args.random_seed)
# set logger
utils.train_utils.set_logger(args.model_save_path, os.path.basename(__file__).replace('.py', '.log'))
logging.info("PyTorch version: {}".format(torch.__version__))
logging.info("CUDA version: {}".format(torch.version.cuda))
logging.info("{} GPUs, default {}".format(torch.cuda.device_count(), device))
logging.info(pprint.pformat(vars(args)))
# dataset config
if args.model == 'seq2seq':
collate_fn = word_seq_collate_fn
else:
collate_fn = default_collate_fn
# dataset
mean_dir_vec = np.array(args.mean_dir_vec).reshape(-1, 3)
train_dataset = SpeechMotionDataset(args.train_data_path[0],
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
mean_dir_vec=mean_dir_vec,
mean_pose=args.mean_pose,
remove_word_timing=(args.input_context == 'text')
)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True, num_workers=args.loader_workers, pin_memory=True,
collate_fn=collate_fn
)
val_dataset = SpeechMotionDataset(args.val_data_path[0],
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
speaker_model=train_dataset.speaker_model,
mean_dir_vec=mean_dir_vec,
mean_pose=args.mean_pose,
remove_word_timing=(args.input_context == 'text')
)
test_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size,
shuffle=False, drop_last=True, num_workers=args.loader_workers, pin_memory=True,
collate_fn=collate_fn
)
test_dataset = SpeechMotionDataset(args.test_data_path[0],
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
speaker_model=train_dataset.speaker_model,
mean_dir_vec=mean_dir_vec,
mean_pose=args.mean_pose)
# build vocab
vocab_cache_path = os.path.join(os.path.split(args.train_data_path[0])[0], 'vocab_cache.pkl')
lang_model = build_vocab('words', [train_dataset, val_dataset, test_dataset], vocab_cache_path, args.wordembed_path,
args.wordembed_dim)
train_dataset.set_lang_model(lang_model)
val_dataset.set_lang_model(lang_model)
# train
pose_dim = 27 # 9 x 3
train_epochs(args, train_loader, test_loader, lang_model,
pose_dim=pose_dim, speaker_model=train_dataset.speaker_model)
if __name__ == '__main__':
_args = parse_args()
main({'args': _args})
| 23,232 | 45.005941 | 120 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/data_loader/h36m_loader.py | import math
import random
import torch
import numpy as np
from torch.utils.data import Dataset
from utils.data_utils import convert_pose_seq_to_dir_vec, convert_dir_vec_to_pose
train_subject = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
test_subject = ['S11']
class Human36M(Dataset):
def __init__(self, path, mean_data, is_train=True, augment=False):
n_poses = 34
target_joints = [1, 6, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27] # see https://github.com/kenkra/3d-pose-baseline-vmd/wiki/body
self.is_train = is_train
self.augment = augment
self.mean_data = mean_data
self.data = []
if is_train:
subjects = train_subject
else:
subjects = test_subject
# loading data and normalize
frame_stride = 2
data = np.load(path, allow_pickle=True)['positions_3d'].item()
for subject, actions in data.items():
if subject not in subjects:
continue
for action_name, positions in actions.items():
positions = positions[:, target_joints]
positions = self.normalize(positions)
for f in range(0, len(positions), 10):
if f+n_poses*frame_stride > len(positions):
break
self.data.append(positions[f:f+n_poses*frame_stride:frame_stride])
def __getitem__(self, index):
poses = self.data[index]
dir_vec = convert_pose_seq_to_dir_vec(poses)
poses = convert_dir_vec_to_pose(dir_vec)
if self.augment: # data augmentation by adding gaussian noises on joints coordinates
rand_val = random.random()
if rand_val < 0.2:
poses = poses.copy()
poses += np.random.normal(0, 0.002 ** 0.5, poses.shape)
else:
poses = poses.copy()
poses += np.random.normal(0, 0.0001 ** 0.5, poses.shape)
dir_vec = convert_pose_seq_to_dir_vec(poses)
dir_vec = dir_vec.reshape(dir_vec.shape[0], -1)
dir_vec = dir_vec - self.mean_data
poses = torch.from_numpy(poses).float()
dir_vec = torch.from_numpy(dir_vec).float()
return poses, dir_vec
def __len__(self):
return len(self.data)
def normalize(self, data):
# pose normalization
for f in range(data.shape[0]):
data[f, :] -= data[f, 2]
data[f, :, (0, 1, 2)] = data[f, :, (0, 2, 1)] # xy exchange
data[f, :, 1] = -data[f, :, 1] # invert y
# frontalize based on hip joints
for f in range(data.shape[0]):
hip_vec = data[f, 1] - data[f, 0]
angle = np.pi - np.math.atan2(hip_vec[2], hip_vec[0]) # angles on XZ plane
if 180 > np.rad2deg(angle) > 0:
pass
elif 180 < np.rad2deg(angle) < 360:
angle = angle - np.deg2rad(360)
rot = self.rotation_matrix([0, 1, 0], angle)
data[f] = np.matmul(data[f], rot)
data = data[:, 2:] # exclude hip joints
return data
@staticmethod
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
| 3,853 | 34.685185 | 134 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/data_loader/lmdb_data_loader.py | import datetime
import logging
import os
import pickle
import random
import numpy as np
import lmdb as lmdb
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import utils.train_utils
import utils.data_utils
from model.vocab import Vocab
from data_loader.data_preprocessor import DataPreprocessor
import pyarrow
def word_seq_collate_fn(data):
""" collate function for loading word sequences in variable lengths """
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x[0]), reverse=True)
# separate source and target sequences
word_seq, text_padded, poses_seq, vec_seq, audio, spectrogram, aux_info = zip(*data)
# merge sequences
words_lengths = torch.LongTensor([len(x) for x in word_seq])
word_seq = pad_sequence(word_seq, batch_first=True).long()
text_padded = default_collate(text_padded)
poses_seq = default_collate(poses_seq)
vec_seq = default_collate(vec_seq)
audio = default_collate(audio)
spectrogram = default_collate(spectrogram)
aux_info = {key: default_collate([d[key] for d in aux_info]) for key in aux_info[0]}
return word_seq, words_lengths, text_padded, poses_seq, vec_seq, audio, spectrogram, aux_info
def default_collate_fn(data):
_, text_padded, pose_seq, vec_seq, audio, spectrogram, aux_info = zip(*data)
text_padded = default_collate(text_padded)
pose_seq = default_collate(pose_seq)
vec_seq = default_collate(vec_seq)
audio = default_collate(audio)
spectrogram = default_collate(spectrogram)
aux_info = {key: default_collate([d[key] for d in aux_info]) for key in aux_info[0]}
return torch.tensor([0]), torch.tensor([0]), text_padded, pose_seq, vec_seq, audio, spectrogram, aux_info
class SpeechMotionDataset(Dataset):
def __init__(self, lmdb_dir, n_poses, subdivision_stride, pose_resampling_fps, mean_pose, mean_dir_vec,
speaker_model=None, remove_word_timing=False):
self.lmdb_dir = lmdb_dir
self.n_poses = n_poses
self.subdivision_stride = subdivision_stride
self.skeleton_resampling_fps = pose_resampling_fps
self.mean_dir_vec = mean_dir_vec
self.remove_word_timing = remove_word_timing
self.expected_audio_length = int(round(n_poses / pose_resampling_fps * 16000))
self.expected_spectrogram_length = utils.data_utils.calc_spectrogram_length_from_motion_length(
n_poses, pose_resampling_fps)
self.lang_model = None
logging.info("Reading data '{}'...".format(lmdb_dir))
preloaded_dir = lmdb_dir + '_cache'
if not os.path.exists(preloaded_dir):
logging.info('Creating the dataset cache...')
assert mean_dir_vec is not None
if mean_dir_vec.shape[-1] != 3:
mean_dir_vec = mean_dir_vec.reshape(mean_dir_vec.shape[:-1] + (-1, 3))
n_poses_extended = int(round(n_poses * 1.25)) # some margin
data_sampler = DataPreprocessor(lmdb_dir, preloaded_dir, n_poses_extended,
subdivision_stride, pose_resampling_fps, mean_pose, mean_dir_vec)
data_sampler.run()
else:
logging.info('Found the cache {}'.format(preloaded_dir))
# init lmdb
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
with self.lmdb_env.begin() as txn:
self.n_samples = txn.stat()['entries']
# make a speaker model
if speaker_model is None or speaker_model == 0:
precomputed_model = lmdb_dir + '_speaker_model.pkl'
if not os.path.exists(precomputed_model):
self._make_speaker_model(lmdb_dir, precomputed_model)
else:
with open(precomputed_model, 'rb') as f:
self.speaker_model = pickle.load(f)
else:
self.speaker_model = speaker_model
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
with self.lmdb_env.begin(write=False) as txn:
key = '{:010}'.format(idx).encode('ascii')
sample = txn.get(key)
sample = pyarrow.deserialize(sample)
word_seq, pose_seq, vec_seq, audio, spectrogram, aux_info = sample
def extend_word_seq(lang, words, end_time=None):
n_frames = self.n_poses
if end_time is None:
end_time = aux_info['end_time']
frame_duration = (end_time - aux_info['start_time']) / n_frames
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
if self.remove_word_timing:
n_words = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
n_words += 1
space = int(n_frames / (n_words + 1))
for i in range(n_words):
idx = (i+1) * space
extended_word_indices[idx] = lang.get_word_index(words[i][0])
else:
prev_idx = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
extended_word_indices[idx] = lang.get_word_index(word[0])
# extended_word_indices[prev_idx:idx+1] = lang.get_word_index(word[0])
prev_idx = idx
return torch.Tensor(extended_word_indices).long()
def words_to_tensor(lang, words, end_time=None):
indexes = [lang.SOS_token]
for word in words:
if end_time is not None and word[1] > end_time:
break
indexes.append(lang.get_word_index(word[0]))
indexes.append(lang.EOS_token)
return torch.Tensor(indexes).long()
duration = aux_info['end_time'] - aux_info['start_time']
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * self.n_poses / vec_seq.shape[0]
audio = utils.data_utils.make_audio_fixed_length(audio, self.expected_audio_length)
spectrogram = spectrogram[:, 0:self.expected_spectrogram_length]
vec_seq = vec_seq[0:self.n_poses]
pose_seq = pose_seq[0:self.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = words_to_tensor(self.lang_model, word_seq, sample_end_time)
extended_word_seq = extend_word_seq(self.lang_model, word_seq, sample_end_time)
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
audio = torch.from_numpy(audio).float()
spectrogram = torch.from_numpy(spectrogram)
return word_seq_tensor, extended_word_seq, pose_seq, vec_seq, audio, spectrogram, aux_info
def set_lang_model(self, lang_model):
self.lang_model = lang_model
def _make_speaker_model(self, lmdb_dir, cache_path):
logging.info(' building a speaker model...')
speaker_model = Vocab('vid', insert_default_tokens=False)
lmdb_env = lmdb.open(lmdb_dir, readonly=True, lock=False)
txn = lmdb_env.begin(write=False)
cursor = txn.cursor()
for key, value in cursor:
video = pyarrow.deserialize(value)
vid = video['vid']
speaker_model.index_word(vid)
lmdb_env.close()
logging.info(' indexed %d videos' % speaker_model.n_words)
self.speaker_model = speaker_model
# cache
with open(cache_path, 'wb') as f:
pickle.dump(self.speaker_model, f)
| 8,022 | 39.933673 | 109 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_speech2gesture.py | import torch
import torch.nn.functional as F
def train_iter_speech2gesture(args, in_spec, target_poses, pose_decoder, discriminator,
pose_dec_optim, dis_optim, loss_fn):
# generation
pre_poses = target_poses[:, 0:args.n_pre_poses]
out_poses = pose_decoder(in_spec, pre_poses)
# to motion
target_motion = target_poses[:, 1:] - target_poses[:, :-1]
out_motion = out_poses[:, 1:] - out_poses[:, :-1]
###########################################################################################
# train D
dis_optim.zero_grad()
dis_real = discriminator(target_motion)
dis_fake = discriminator(out_motion.detach())
dis_error = F.mse_loss(torch.ones_like(dis_real), dis_real) + F.mse_loss(torch.zeros_like(dis_fake), dis_fake)
dis_error.backward()
dis_optim.step()
###########################################################################################
# train G
pose_dec_optim.zero_grad()
l1_loss = loss_fn(out_poses, target_poses)
dis_output = discriminator(out_motion)
gen_error = F.mse_loss(torch.ones_like(dis_output), dis_output)
loss = args.loss_regression_weight * l1_loss + args.loss_gan_weight * gen_error
loss.backward()
pose_dec_optim.step()
return {'loss': args.loss_regression_weight * l1_loss.item(), 'gen': args.loss_gan_weight * gen_error.item(),
'dis': dis_error.item()}
| 1,430 | 36.657895 | 114 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_joint_embed.py | import torch
import torch.nn.functional as F
def train_iter_embed(args, epoch, in_text, in_audio, target_data, net, optim, mode=None):
pre_seq = target_data[:, 0:args.n_pre_poses]
# zero gradients
optim.zero_grad()
if mode == 'random': # joint embed model
variational_encoding = False # AE
else: # feature extractor in FGD
variational_encoding = False # VAE or AE
# reconstruction loss
context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, recon_data = \
net(in_text, in_audio, pre_seq, target_data, mode, variational_encoding=variational_encoding)
recon_loss = F.l1_loss(recon_data, target_data, reduction='none')
recon_loss = torch.mean(recon_loss, dim=(1, 2))
if False: # use pose diff
target_diff = target_data[:, 1:] - target_data[:, :-1]
recon_diff = recon_data[:, 1:] - recon_data[:, :-1]
recon_loss += torch.mean(F.l1_loss(recon_diff, target_diff, reduction='none'), dim=(1, 2))
recon_loss = torch.sum(recon_loss)
# KLD
if variational_encoding:
if net.mode == 'speech':
KLD = -0.5 * torch.sum(1 + context_logvar - context_mu.pow(2) - context_logvar.exp())
else:
KLD = -0.5 * torch.sum(1 + pose_logvar - pose_mu.pow(2) - pose_logvar.exp())
if epoch < 10:
KLD_weight = 0
else:
KLD_weight = min(1.0, (epoch - 10) * args.loss_kld_weight)
loss = args.loss_regression_weight * recon_loss + KLD_weight * KLD
else:
loss = recon_loss
loss.backward()
optim.step()
ret_dict = {'loss': recon_loss.item()}
if variational_encoding:
ret_dict['KLD'] = KLD.item()
return ret_dict
def eval_embed(in_text, in_audio, pre_poses, target_poses, net, mode=None):
context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, recon_poses = \
net(in_text, in_audio, pre_poses, target_poses, mode, variational_encoding=False)
recon_loss = F.l1_loss(recon_poses, target_poses, reduction='none')
recon_loss = torch.mean(recon_loss, dim=(1, 2))
loss = torch.mean(recon_loss)
return loss, recon_poses
| 2,191 | 33.793651 | 101 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_gan.py | import random
import numpy as np
import torch
import torch.nn.functional as F
def add_noise(data):
noise = torch.randn_like(data) * 0.1
return data + noise
def train_iter_gan(args, epoch, in_text, in_audio, target_poses, vid_indices,
pose_decoder, discriminator,
pose_dec_optim, dis_optim):
warm_up_epochs = args.loss_warmup
use_noisy_target = False
# make pre seq input
pre_seq = target_poses.new_zeros((target_poses.shape[0], target_poses.shape[1], target_poses.shape[2] + 1))
pre_seq[:, 0:args.n_pre_poses, :-1] = target_poses[:, 0:args.n_pre_poses]
pre_seq[:, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
###########################################################################################
# train D
dis_error = None
if epoch > warm_up_epochs and args.loss_gan_weight > 0.0:
dis_optim.zero_grad()
out_dir_vec, *_ = pose_decoder(pre_seq, in_text, in_audio, vid_indices) # out shape (batch x seq x dim)
if use_noisy_target:
noise_target = add_noise(target_poses)
noise_out = add_noise(out_dir_vec.detach())
dis_real = discriminator(noise_target, in_text)
dis_fake = discriminator(noise_out, in_text)
else:
dis_real = discriminator(target_poses, in_text)
dis_fake = discriminator(out_dir_vec.detach(), in_text)
dis_error = torch.sum(-torch.mean(torch.log(dis_real + 1e-8) + torch.log(1 - dis_fake + 1e-8))) # ns-gan
dis_error.backward()
dis_optim.step()
###########################################################################################
# train G
pose_dec_optim.zero_grad()
# decoding
out_dir_vec, z, z_mu, z_logvar = pose_decoder(pre_seq, in_text, in_audio, vid_indices)
# loss
beta = 0.1
huber_loss = F.smooth_l1_loss(out_dir_vec / beta, target_poses / beta) * beta
dis_output = discriminator(out_dir_vec, in_text)
gen_error = -torch.mean(torch.log(dis_output + 1e-8))
kld = div_reg = None
if (args.z_type == 'speaker' or args.z_type == 'random') and args.loss_reg_weight > 0.0:
if args.z_type == 'speaker':
# enforcing divergent gestures btw original vid and other vid
rand_idx = torch.randperm(vid_indices.shape[0])
rand_vids = vid_indices[rand_idx]
else:
rand_vids = None
out_dir_vec_rand_vid, z_rand_vid, _, _ = pose_decoder(pre_seq, in_text, in_audio, rand_vids)
beta = 0.05
pose_l1 = F.smooth_l1_loss(out_dir_vec / beta, out_dir_vec_rand_vid.detach() / beta, reduction='none') * beta
pose_l1 = pose_l1.sum(dim=1).sum(dim=1)
pose_l1 = pose_l1.view(pose_l1.shape[0], -1).mean(1)
z_l1 = F.l1_loss(z.detach(), z_rand_vid.detach(), reduction='none')
z_l1 = z_l1.view(z_l1.shape[0], -1).mean(1)
div_reg = -(pose_l1 / (z_l1 + 1.0e-5))
div_reg = torch.clamp(div_reg, min=-1000)
div_reg = div_reg.mean()
if args.z_type == 'speaker':
# speaker embedding KLD
kld = -0.5 * torch.mean(1 + z_logvar - z_mu.pow(2) - z_logvar.exp())
loss = args.loss_regression_weight * huber_loss + args.loss_kld_weight * kld + args.loss_reg_weight * div_reg
else:
loss = args.loss_regression_weight * huber_loss + args.loss_reg_weight * div_reg
else:
loss = args.loss_regression_weight * huber_loss #+ var_loss
if epoch > warm_up_epochs:
loss += args.loss_gan_weight * gen_error
loss.backward()
pose_dec_optim.step()
ret_dict = {'loss': args.loss_regression_weight * huber_loss.item()}
if kld:
ret_dict['KLD'] = args.loss_kld_weight * kld.item()
if div_reg:
ret_dict['DIV_REG'] = args.loss_reg_weight * div_reg.item()
if epoch > warm_up_epochs and args.loss_gan_weight > 0.0:
ret_dict['gen'] = args.loss_gan_weight * gen_error.item()
ret_dict['dis'] = dis_error.item()
return ret_dict
| 4,074 | 37.809524 | 121 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_seq2seq.py | import logging
import torch
import torch.nn.functional as F
loss_i = 0
def custom_loss(output, target, args, epoch):
n_element = output.numel()
# mae
mse_loss = F.mse_loss(output, target)
mse_loss *= args.loss_regression_weight
# continuous motion
diff = [abs(output[:, n, :] - output[:, n-1, :]) for n in range(1, output.shape[1])]
cont_loss = torch.sum(torch.stack(diff)) / n_element
cont_loss *= args.loss_kld_weight
# motion variance
norm = torch.norm(output, 2, 1) # output shape (batch, seq, dim)
var_loss = -torch.sum(norm) / n_element
var_loss *= args.loss_reg_weight
loss = mse_loss + cont_loss + var_loss
# debugging code
global loss_i
if loss_i == 1000:
logging.debug('(custom loss) mse %.5f, cont %.5f, var %.5f'
% (mse_loss.item(), cont_loss.item(), var_loss.item()))
loss_i = 0
loss_i += 1
return loss
def train_iter_seq2seq(args, epoch, in_text, in_lengths, target_poses, net, optim):
# zero gradients
optim.zero_grad()
# generation
outputs = net(in_text, in_lengths, target_poses, None)
# loss
loss = custom_loss(outputs, target_poses, args, epoch)
loss.backward()
# optimize
torch.nn.utils.clip_grad_norm_(net.parameters(), 5)
optim.step()
return {'loss': loss.item()}
| 1,354 | 25.057692 | 88 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/utils/data_utils.py | import re
import librosa
import numpy as np
import torch
from scipy.interpolate import interp1d
from sklearn.preprocessing import normalize
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
skeleton_line_pairs = [(0, 1, 'b'), (1, 2, 'darkred'), (2, 3, 'r'), (3, 4, 'orange'), (1, 5, 'darkgreen'),
(5, 6, 'limegreen'), (6, 7, 'darkseagreen')]
dir_vec_pairs = [(0, 1, 0.26), (1, 2, 0.18), (2, 3, 0.14), (1, 4, 0.22), (4, 5, 0.36),
(5, 6, 0.33), (1, 7, 0.22), (7, 8, 0.36), (8, 9, 0.33)] # adjacency and bone length
def normalize_string(s):
""" lowercase, trim, and remove non-letter characters """
s = s.lower().strip()
s = re.sub(r"([,.!?])", r" \1 ", s) # isolate some marks
s = re.sub(r"(['])", r"", s) # remove apostrophe
s = re.sub(r"[^a-zA-Z,.!?]+", r" ", s) # replace other characters with whitespace
s = re.sub(r"\s+", r" ", s).strip()
return s
def remove_tags_marks(text):
reg_expr = re.compile('<.*?>|[.,:;!?]+')
clean_text = re.sub(reg_expr, '', text)
return clean_text
def extract_melspectrogram(y, sr=16000):
melspec = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=1024, hop_length=512, power=2)
log_melspec = librosa.power_to_db(melspec, ref=np.max) # mels x time
log_melspec = log_melspec.astype('float16')
return log_melspec
def calc_spectrogram_length_from_motion_length(n_frames, fps):
ret = (n_frames / fps * 16000 - 1024) / 512 + 1
return int(round(ret))
def resample_pose_seq(poses, duration_in_sec, fps):
n = len(poses)
x = np.arange(0, n)
y = poses
f = interp1d(x, y, axis=0, kind='linear', fill_value='extrapolate')
expected_n = duration_in_sec * fps
x_new = np.arange(0, n, n / expected_n)
interpolated_y = f(x_new)
if hasattr(poses, 'dtype'):
interpolated_y = interpolated_y.astype(poses.dtype)
return interpolated_y
def time_stretch_for_words(words, start_time, speech_speed_rate):
for i in range(len(words)):
if words[i][1] > start_time:
words[i][1] = start_time + (words[i][1] - start_time) / speech_speed_rate
words[i][2] = start_time + (words[i][2] - start_time) / speech_speed_rate
return words
def make_audio_fixed_length(audio, expected_audio_length):
n_padding = expected_audio_length - len(audio)
if n_padding > 0:
audio = np.pad(audio, (0, n_padding), mode='symmetric')
else:
audio = audio[0:expected_audio_length]
return audio
def convert_dir_vec_to_pose(vec):
vec = np.array(vec)
if vec.shape[-1] != 3:
vec = vec.reshape(vec.shape[:-1] + (-1, 3))
if len(vec.shape) == 2:
joint_pos = np.zeros((10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[pair[1]] = joint_pos[pair[0]] + pair[2] * vec[j]
elif len(vec.shape) == 3:
joint_pos = np.zeros((vec.shape[0], 10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[:, pair[1]] = joint_pos[:, pair[0]] + pair[2] * vec[:, j]
elif len(vec.shape) == 4: # (batch, seq, 9, 3)
joint_pos = np.zeros((vec.shape[0], vec.shape[1], 10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[:, :, pair[1]] = joint_pos[:, :, pair[0]] + pair[2] * vec[:, :, j]
else:
assert False
return joint_pos
def convert_pose_seq_to_dir_vec(pose):
if pose.shape[-1] != 3:
pose = pose.reshape(pose.shape[:-1] + (-1, 3))
if len(pose.shape) == 3:
dir_vec = np.zeros((pose.shape[0], len(dir_vec_pairs), 3))
for i, pair in enumerate(dir_vec_pairs):
dir_vec[:, i] = pose[:, pair[1]] - pose[:, pair[0]]
dir_vec[:, i, :] = normalize(dir_vec[:, i, :], axis=1) # to unit length
elif len(pose.shape) == 4: # (batch, seq, ...)
dir_vec = np.zeros((pose.shape[0], pose.shape[1], len(dir_vec_pairs), 3))
for i, pair in enumerate(dir_vec_pairs):
dir_vec[:, :, i] = pose[:, :, pair[1]] - pose[:, :, pair[0]]
for j in range(dir_vec.shape[0]): # batch
for i in range(len(dir_vec_pairs)):
dir_vec[j, :, i, :] = normalize(dir_vec[j, :, i, :], axis=1) # to unit length
else:
assert False
return dir_vec
| 4,295 | 34.504132 | 106 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/utils/train_utils.py | import logging
import os
import pickle
import random
import subprocess
from collections import defaultdict, namedtuple
from logging.handlers import RotatingFileHandler
from textwrap import wrap
import numpy as np
import re
import time
import math
import soundfile as sf
import librosa.display
import matplotlib
import matplotlib.pyplot as plt
import torch
import matplotlib.ticker as ticker
import matplotlib.animation as animation
from mpl_toolkits import mplot3d
import utils.data_utils
import train
import data_loader.lmdb_data_loader
# only for unicode characters, you may remove these two lines
from model import vocab
matplotlib.rcParams['axes.unicode_minus'] = False
def set_logger(log_path=None, log_filename='log'):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
if log_path is not None:
os.makedirs(log_path, exist_ok=True)
handlers.append(
RotatingFileHandler(os.path.join(log_path, log_filename), maxBytes=10 * 1024 * 1024, backupCount=5))
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s: %(message)s', handlers=handlers)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since):
now = time.time()
s = now - since
return '%s' % as_minutes(s)
def create_video_and_save(save_path, epoch, prefix, iter_idx, target, output, mean_data, title,
audio=None, aux_str=None, clipping_to_shortest_stream=False, delete_audio_file=True):
print('rendering a video...')
start = time.time()
fig = plt.figure(figsize=(8, 4))
axes = [fig.add_subplot(1, 2, 1, projection='3d'), fig.add_subplot(1, 2, 2, projection='3d')]
axes[0].view_init(elev=20, azim=-60)
axes[1].view_init(elev=20, azim=-60)
fig_title = title
if aux_str:
fig_title += ('\n' + aux_str)
fig.suptitle('\n'.join(wrap(fig_title, 75)), fontsize='medium')
# un-normalization and convert to poses
mean_data = mean_data.flatten()
output = output + mean_data
output_poses = utils.data_utils.convert_dir_vec_to_pose(output)
target_poses = None
if target is not None:
target = target + mean_data
target_poses = utils.data_utils.convert_dir_vec_to_pose(target)
def animate(i):
for k, name in enumerate(['human', 'generated']):
if name == 'human' and target is not None and i < len(target):
pose = target_poses[i]
elif name == 'generated' and i < len(output):
pose = output_poses[i]
else:
pose = None
if pose is not None:
axes[k].clear()
for j, pair in enumerate(utils.data_utils.dir_vec_pairs):
axes[k].plot([pose[pair[0], 0], pose[pair[1], 0]],
[pose[pair[0], 2], pose[pair[1], 2]],
[pose[pair[0], 1], pose[pair[1], 1]],
zdir='z', linewidth=5)
axes[k].set_xlim3d(-0.5, 0.5)
axes[k].set_ylim3d(0.5, -0.5)
axes[k].set_zlim3d(0.5, -0.5)
axes[k].set_xlabel('x')
axes[k].set_ylabel('z')
axes[k].set_zlabel('y')
axes[k].set_title('{} ({}/{})'.format(name, i + 1, len(output)))
if target is not None:
num_frames = max(len(target), len(output))
else:
num_frames = len(output)
ani = animation.FuncAnimation(fig, animate, interval=30, frames=num_frames, repeat=False)
# show audio
audio_path = None
if audio is not None:
assert len(audio.shape) == 1 # 1-channel, raw signal
audio = audio.astype(np.float32)
sr = 16000
audio_path = '{}/{}_{:03d}_{}.wav'.format(save_path, prefix, epoch, iter_idx)
sf.write(audio_path, audio, sr)
# save video
try:
video_path = '{}/temp_{}_{:03d}_{}.mp4'.format(save_path, prefix, epoch, iter_idx)
ani.save(video_path, fps=15, dpi=80) # dpi 150 for a higher resolution
del ani
plt.close(fig)
except RuntimeError:
assert False, 'RuntimeError'
# merge audio and video
if audio is not None:
merged_video_path = '{}/{}_{:03d}_{}.mp4'.format(save_path, prefix, epoch, iter_idx)
cmd = ['ffmpeg', '-loglevel', 'panic', '-y', '-i', video_path, '-i', audio_path, '-strict', '-2',
merged_video_path]
if clipping_to_shortest_stream:
cmd.insert(len(cmd) - 1, '-shortest')
subprocess.call(cmd)
if delete_audio_file:
os.remove(audio_path)
os.remove(video_path)
print('done, took {:.1f} seconds'.format(time.time() - start))
return output_poses, target_poses
def save_checkpoint(state, filename):
torch.save(state, filename)
logging.info('Saved the checkpoint')
def get_speaker_model(net):
try:
if hasattr(net, 'module'):
speaker_model = net.module.z_obj
else:
speaker_model = net.z_obj
except AttributeError:
speaker_model = None
if not isinstance(speaker_model, vocab.Vocab):
speaker_model = None
return speaker_model
def load_checkpoint_and_model(checkpoint_path, _device='cpu'):
print('loading checkpoint {}'.format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location=_device)
args = checkpoint['args']
epoch = checkpoint['epoch']
lang_model = checkpoint['lang_model']
speaker_model = checkpoint['speaker_model']
pose_dim = checkpoint['pose_dim']
print('epoch {}'.format(epoch))
generator, discriminator, loss_fn = train.init_model(args, lang_model, speaker_model, pose_dim, _device)
generator.load_state_dict(checkpoint['gen_dict'])
# set to eval mode
generator.train(False)
return args, generator, loss_fn, lang_model, speaker_model, pose_dim
def set_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
| 6,259 | 31.604167 | 112 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/model/seq2seq_net.py | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import math
'''
Based on the following Se2Seq implementations:
- https://github.com/AuCson/PyTorch-Batch-Attention-Seq2seq
- https://github.com/spro/practical-pytorch/blob/master/seq2seq-translation/seq2seq-translation-batched.ipynb
'''
class EncoderRNN(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, n_layers=1, dropout=0.5, pre_trained_embedding=None):
super(EncoderRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout = dropout
if pre_trained_embedding is not None: # use pre-trained embedding (e.g., word2vec, glove)
assert pre_trained_embedding.shape[0] == input_size
assert pre_trained_embedding.shape[1] == embed_size
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(pre_trained_embedding), freeze=False)
else:
self.embedding = nn.Embedding(input_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, input_seqs, input_lengths, hidden=None):
'''
:param input_seqs:
Variable of shape (num_step(T),batch_size(B)), sorted decreasingly by lengths(for packing)
:param input_lengths:
list of sequence length
:param hidden:
initial state of GRU
:returns:
GRU outputs in shape (T,B,hidden_size(H))
last hidden stat of RNN(i.e. last output for GRU)
'''
if self.do_flatten_parameters:
self.gru.flatten_parameters()
embedded = self.embedding(input_seqs)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
outputs, hidden = self.gru(packed, hidden)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded)
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs
return outputs, hidden
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs):
'''
:param hidden:
previous hidden state of the decoder, in shape (layers*directions,B,H)
:param encoder_outputs:
encoder outputs from Encoder, in shape (T,B,H)
:return
attention energies in shape (B,T)
'''
max_len = encoder_outputs.size(0)
this_batch_size = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
encoder_outputs = encoder_outputs.transpose(0, 1) # [B*T*H]
attn_energies = self.score(H, encoder_outputs) # compute attention score
return F.softmax(attn_energies, dim=1).unsqueeze(1) # normalize with softmax
def score(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) # [B*T*2H]->[B*T*H]
energy = energy.transpose(2, 1) # [B*H*T]
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) # [B*1*H]
energy = torch.bmm(v, energy) # [B*1*T]
return energy.squeeze(1) # [B*T]
class BahdanauAttnDecoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1, dropout_p=0.1,
discrete_representation=False, speaker_model=None):
super(BahdanauAttnDecoderRNN, self).__init__()
# define parameters
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.discrete_representation = discrete_representation
self.speaker_model = speaker_model
# define embedding layer
if self.discrete_representation:
self.embedding = nn.Embedding(output_size, hidden_size)
self.dropout = nn.Dropout(dropout_p)
if self.speaker_model:
self.speaker_embedding = nn.Embedding(speaker_model.n_words, 8)
# calc input size
if self.discrete_representation:
input_size = hidden_size # embedding size
linear_input_size = input_size + hidden_size
if self.speaker_model:
linear_input_size += 8
# define layers
self.pre_linear = nn.Sequential(
nn.Linear(linear_input_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True)
)
self.attn = Attn(hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p)
# self.out = nn.Linear(hidden_size * 2, output_size)
self.out = nn.Linear(hidden_size, output_size)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def freeze_attn(self):
for param in self.attn.parameters():
param.requires_grad = False
def forward(self, motion_input, last_hidden, encoder_outputs, vid_indices=None):
'''
:param motion_input:
motion input for current time step, in shape [batch x dim]
:param last_hidden:
last hidden state of the decoder, in shape [layers x batch x hidden_size]
:param encoder_outputs:
encoder outputs in shape [steps x batch x hidden_size]
:param vid_indices:
:return
decoder output
Note: we run this one step at a time i.e. you should use a outer loop
to process the whole sequence
'''
if self.do_flatten_parameters:
self.gru.flatten_parameters()
if self.discrete_representation:
word_embedded = self.embedding(motion_input).view(1, motion_input.size(0), -1) # [1 x B x embedding_dim]
motion_input = self.dropout(word_embedded)
else:
motion_input = motion_input.view(1, motion_input.size(0), -1) # [1 x batch x dim]
# attention
attn_weights = self.attn(last_hidden[-1], encoder_outputs) # [batch x 1 x T]
context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # [batch x 1 x attn_size]
context = context.transpose(0, 1) # [1 x batch x attn_size]
# make input vec
rnn_input = torch.cat((motion_input, context), 2) # [1 x batch x (dim + attn_size)]
if self.speaker_model:
assert vid_indices is not None
speaker_context = self.speaker_embedding(vid_indices).unsqueeze(0)
rnn_input = torch.cat((rnn_input, speaker_context), 2) # [1 x batch x (dim + attn_size + embed_size)]
rnn_input = self.pre_linear(rnn_input.squeeze(0))
rnn_input = rnn_input.unsqueeze(0)
# rnn
output, hidden = self.gru(rnn_input, last_hidden)
# post-fc
output = output.squeeze(0) # [1 x batch x hidden_size] -> [batch x hidden_size]
output = self.out(output)
return output, hidden, attn_weights
class Generator(nn.Module):
def __init__(self, args, motion_dim, discrete_representation=False, speaker_model=None):
super(Generator, self).__init__()
self.output_size = motion_dim
self.n_layers = args.n_layers
self.discrete_representation = discrete_representation
self.decoder = BahdanauAttnDecoderRNN(input_size=motion_dim + args.GAN_noise_size,
hidden_size=args.hidden_size,
output_size=self.output_size,
n_layers=self.n_layers,
dropout_p=args.dropout_prob,
discrete_representation=discrete_representation,
speaker_model=speaker_model)
def freeze_attn(self):
self.decoder.freeze_attn()
def forward(self, z, motion_input, last_hidden, encoder_output, vid_indices=None):
if z is None:
input_with_noise_vec = motion_input
else:
assert not self.discrete_representation # not valid for discrete representation
input_with_noise_vec = torch.cat([motion_input, z], dim=1) # [bs x (10+z_size)]
return self.decoder(input_with_noise_vec, last_hidden, encoder_output, vid_indices)
class Seq2SeqNet(nn.Module):
def __init__(self, args, pose_dim, n_frames, n_words, word_embed_size, word_embeddings, speaker_model=None):
super().__init__()
self.encoder = EncoderRNN(
n_words, word_embed_size, args.hidden_size, args.n_layers,
dropout=args.dropout_prob, pre_trained_embedding=word_embeddings)
self.decoder = Generator(args, pose_dim, speaker_model=speaker_model)
# variable for storing outputs
self.n_frames = n_frames
self.n_pre_poses = args.n_pre_poses
def forward(self, in_text, in_lengths, poses, vid_indices):
# reshape to (seq x batch x dim)
in_text = in_text.transpose(0, 1)
poses = poses.transpose(0, 1)
outputs = torch.zeros(self.n_frames, poses.size(1), self.decoder.output_size).to(poses.device)
# run words through encoder
encoder_outputs, encoder_hidden = self.encoder(in_text, in_lengths, None)
decoder_hidden = encoder_hidden[:self.decoder.n_layers] # use last hidden state from encoder
# run through decoder one time step at a time
decoder_input = poses[0] # initial pose from the dataset
outputs[0] = decoder_input
for t in range(1, self.n_frames):
decoder_output, decoder_hidden, _ = self.decoder(None, decoder_input, decoder_hidden, encoder_outputs,
vid_indices)
outputs[t] = decoder_output
if t < self.n_pre_poses:
decoder_input = poses[t] # next input is current target
else:
decoder_input = decoder_output # next input is current prediction
return outputs.transpose(0, 1)
| 10,719 | 41.039216 | 117 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/model/embedding_space_evaluator.py | import time
import numpy as np
import torch
import torch.nn.functional as F
import umap
from scipy import linalg
from model.embedding_net import EmbeddingNet
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning) # ignore warnings
class EmbeddingSpaceEvaluator:
def __init__(self, args, embed_net_path, lang_model, device):
self.n_pre_poses = args.n_pre_poses
# init embed net
ckpt = torch.load(embed_net_path, map_location=device)
n_frames = args.n_poses
word_embeddings = lang_model.word_embedding_weights
mode = 'pose'
self.pose_dim = ckpt['pose_dim']
self.net = EmbeddingNet(args, self.pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
word_embeddings, mode).to(device)
self.net.load_state_dict(ckpt['gen_dict'])
self.net.train(False)
# storage
self.context_feat_list = []
self.real_feat_list = []
self.generated_feat_list = []
self.recon_err_diff = []
def reset(self):
self.context_feat_list = []
self.real_feat_list = []
self.generated_feat_list = []
self.recon_err_diff = []
def get_no_of_samples(self):
return len(self.real_feat_list)
def push_samples(self, context_text, context_spec, generated_poses, real_poses):
# convert poses to latent features
pre_poses = real_poses[:, 0:self.n_pre_poses]
context_feat, _, _, real_feat, _, _, real_recon = self.net(context_text, context_spec, pre_poses, real_poses,
'pose', variational_encoding=False)
_, _, _, generated_feat, _, _, generated_recon = self.net(None, None, pre_poses, generated_poses,
'pose', variational_encoding=False)
if context_feat:
self.context_feat_list.append(context_feat.data.cpu().numpy())
self.real_feat_list.append(real_feat.data.cpu().numpy())
self.generated_feat_list.append(generated_feat.data.cpu().numpy())
# reconstruction error
recon_err_real = F.l1_loss(real_poses, real_recon).item()
recon_err_fake = F.l1_loss(generated_poses, generated_recon).item()
self.recon_err_diff.append(recon_err_fake - recon_err_real)
def get_features_for_viz(self):
generated_feats = np.vstack(self.generated_feat_list)
real_feats = np.vstack(self.real_feat_list)
transformed_feats = umap.UMAP().fit_transform(np.vstack((generated_feats, real_feats)))
n = int(transformed_feats.shape[0] / 2)
generated_feats = transformed_feats[0:n, :]
real_feats = transformed_feats[n:, :]
return real_feats, generated_feats
def get_scores(self):
generated_feats = np.vstack(self.generated_feat_list)
real_feats = np.vstack(self.real_feat_list)
def frechet_distance(samples_A, samples_B):
A_mu = np.mean(samples_A, axis=0)
A_sigma = np.cov(samples_A, rowvar=False)
B_mu = np.mean(samples_B, axis=0)
B_sigma = np.cov(samples_B, rowvar=False)
try:
frechet_dist = self.calculate_frechet_distance(A_mu, A_sigma, B_mu, B_sigma)
except ValueError:
frechet_dist = 1e+10
return frechet_dist
####################################################################
# frechet distance
frechet_dist = frechet_distance(generated_feats, real_feats)
####################################################################
# distance between real and generated samples on the latent feature space
dists = []
for i in range(real_feats.shape[0]):
d = np.sum(np.absolute(real_feats[i] - generated_feats[i])) # MAE
dists.append(d)
feat_dist = np.mean(dists)
return frechet_dist, feat_dist
@staticmethod
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
""" from https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py """
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
| 6,387 | 39.687898 | 117 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/model/embedding_net.py | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.multimodal_context_net import WavEncoder, TextEncoderTCN
def reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def ConvNormRelu(in_channels, out_channels, downsample=False, padding=0, batchnorm=True):
if not downsample:
k = 3
s = 1
else:
k = 4
s = 2
conv_block = nn.Conv1d(in_channels, out_channels, kernel_size=k, stride=s, padding=padding)
norm_block = nn.BatchNorm1d(out_channels)
if batchnorm:
net = nn.Sequential(
conv_block,
norm_block,
nn.LeakyReLU(0.2, True)
)
else:
net = nn.Sequential(
conv_block,
nn.LeakyReLU(0.2, True)
)
return net
class PoseEncoderConv(nn.Module):
def __init__(self, length, dim):
super().__init__()
self.net = nn.Sequential(
ConvNormRelu(dim, 32, batchnorm=True),
ConvNormRelu(32, 64, batchnorm=True),
ConvNormRelu(64, 64, True, batchnorm=True),
nn.Conv1d(64, 32, 3)
)
self.out_net = nn.Sequential(
# nn.Linear(864, 256), # for 64 frames
nn.Linear(384, 256), # for 34 frames
nn.BatchNorm1d(256),
nn.LeakyReLU(True),
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.LeakyReLU(True),
nn.Linear(128, 32),
)
self.fc_mu = nn.Linear(32, 32)
self.fc_logvar = nn.Linear(32, 32)
def forward(self, poses, variational_encoding):
# encode
poses = poses.transpose(1, 2) # to (bs, dim, seq)
out = self.net(poses)
out = out.flatten(1)
out = self.out_net(out)
# return out, None, None
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
if variational_encoding:
z = reparameterize(mu, logvar)
else:
z = mu
return z, mu, logvar
class PoseDecoderFC(nn.Module):
def __init__(self, gen_length, pose_dim, use_pre_poses=False):
super().__init__()
self.gen_length = gen_length
self.pose_dim = pose_dim
self.use_pre_poses = use_pre_poses
in_size = 32
if use_pre_poses:
self.pre_pose_net = nn.Sequential(
nn.Linear(pose_dim * 4, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32, 32),
)
in_size += 32
self.net = nn.Sequential(
nn.Linear(in_size, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, gen_length * pose_dim),
)
def forward(self, latent_code, pre_poses=None):
if self.use_pre_poses:
pre_pose_feat = self.pre_pose_net(pre_poses.reshape(pre_poses.shape[0], -1))
feat = torch.cat((pre_pose_feat, latent_code), dim=1)
else:
feat = latent_code
output = self.net(feat)
output = output.view(-1, self.gen_length, self.pose_dim)
return output
class PoseDecoderGRU(nn.Module):
def __init__(self, gen_length, pose_dim):
super().__init__()
self.gen_length = gen_length
self.pose_dim = pose_dim
self.in_size = 32 + 32
self.hidden_size = 300
self.pre_pose_net = nn.Sequential(
nn.Linear(pose_dim * 4, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32, 32),
)
self.gru = nn.GRU(self.in_size, hidden_size=self.hidden_size, num_layers=4, batch_first=True,
bidirectional=True, dropout=0.3)
self.out = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size // 2),
nn.LeakyReLU(True),
nn.Linear(self.hidden_size // 2, pose_dim)
)
def forward(self, latent_code, pre_poses):
pre_pose_feat = self.pre_pose_net(pre_poses.reshape(pre_poses.shape[0], -1))
feat = torch.cat((pre_pose_feat, latent_code), dim=1)
feat = feat.unsqueeze(1).repeat(1, self.gen_length, 1)
output, decoder_hidden = self.gru(feat)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
output = self.out(output.reshape(-1, output.shape[2]))
output = output.view(pre_poses.shape[0], self.gen_length, -1)
return output
class PoseDecoderConv(nn.Module):
def __init__(self, length, dim, use_pre_poses=False):
super().__init__()
self.use_pre_poses = use_pre_poses
feat_size = 32
if use_pre_poses:
self.pre_pose_net = nn.Sequential(
nn.Linear(dim * 4, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32, 32),
)
feat_size += 32
if length == 64:
self.pre_net = nn.Sequential(
nn.Linear(feat_size, 128),
nn.BatchNorm1d(128),
nn.LeakyReLU(True),
nn.Linear(128, 256),
)
elif length == 34:
self.pre_net = nn.Sequential(
nn.Linear(feat_size, 64),
nn.BatchNorm1d(64),
nn.LeakyReLU(True),
nn.Linear(64, 136),
)
else:
assert False
self.net = nn.Sequential(
nn.ConvTranspose1d(4, 32, 3),
nn.BatchNorm1d(32),
nn.LeakyReLU(0.2, True),
nn.ConvTranspose1d(32, 32, 3),
nn.BatchNorm1d(32),
nn.LeakyReLU(0.2, True),
nn.Conv1d(32, 32, 3),
nn.Conv1d(32, dim, 3),
)
def forward(self, feat, pre_poses=None):
if self.use_pre_poses:
pre_pose_feat = self.pre_pose_net(pre_poses.reshape(pre_poses.shape[0], -1))
feat = torch.cat((pre_pose_feat, feat), dim=1)
out = self.pre_net(feat)
out = out.view(feat.shape[0], 4, -1)
out = self.net(out)
out = out.transpose(1, 2)
return out
class ContextEncoder(nn.Module):
def __init__(self, args, n_frames, n_words, word_embed_size, word_embeddings):
super().__init__()
# encoders
self.text_encoder = TextEncoderTCN(args, n_words, word_embed_size, pre_trained_embedding=word_embeddings)
self.audio_encoder = WavEncoder()
self.gru = nn.GRU(32+32, hidden_size=256, num_layers=2,
bidirectional=False, batch_first=True)
self.out = nn.Sequential(
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Linear(128, 32)
)
self.fc_mu = nn.Linear(32, 32)
self.fc_logvar = nn.Linear(32, 32)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, in_text, in_spec):
if self.do_flatten_parameters:
self.gru.flatten_parameters()
text_feat_seq, _ = self.text_encoder(in_text)
audio_feat_seq = self.audio_encoder(in_spec)
input = torch.cat((audio_feat_seq, text_feat_seq), dim=2)
output, _ = self.gru(input)
last_output = output[:, -1]
out = self.out(last_output)
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
z = reparameterize(mu, logvar)
return z, mu, logvar
class EmbeddingNet(nn.Module):
def __init__(self, args, pose_dim, n_frames, n_words, word_embed_size, word_embeddings, mode):
super().__init__()
if mode != 'pose':
self.context_encoder = ContextEncoder(args, n_frames, n_words, word_embed_size, word_embeddings)
self.pose_encoder = PoseEncoderConv(n_frames, pose_dim)
# self.decoder = PoseDecoderFC(n_frames, pose_dim, use_pre_poses=True)
self.decoder = PoseDecoderGRU(n_frames, pose_dim)
else:
self.context_encoder = None
self.pose_encoder = PoseEncoderConv(n_frames, pose_dim)
self.decoder = PoseDecoderConv(n_frames, pose_dim)
self.mode = mode
def forward(self, in_text, in_audio, pre_poses, poses, input_mode=None, variational_encoding=False):
if input_mode is None:
assert self.mode is not None
input_mode = self.mode
# context
if self.context_encoder is not None and in_text is not None and in_audio is not None:
context_feat, context_mu, context_logvar = self.context_encoder(in_text, in_audio)
# context_feat = F.normalize(context_feat, p=2, dim=1)
else:
context_feat = context_mu = context_logvar = None
# poses
if poses is not None:
poses_feat, pose_mu, pose_logvar = self.pose_encoder(poses, variational_encoding)
# poses_feat = F.normalize(poses_feat, p=2, dim=1)
else:
poses_feat = pose_mu = pose_logvar = None
# decoder
if input_mode == 'random':
input_mode = 'speech' if random.random() > 0.5 else 'pose'
if input_mode == 'speech':
latent_feat = context_feat
elif input_mode == 'pose':
latent_feat = poses_feat
else:
assert False
out_poses = self.decoder(latent_feat, pre_poses)
return context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, out_poses
def freeze_pose_nets(self):
for param in self.pose_encoder.parameters():
param.requires_grad = False
for param in self.decoder.parameters():
param.requires_grad = False
if __name__ == '__main__':
# for model debugging
n_frames = 64
pose_dim = 10
encoder = PoseEncoderConv(n_frames, pose_dim)
decoder = PoseDecoderConv(n_frames, pose_dim)
poses = torch.randn((4, n_frames, pose_dim))
feat, _, _ = encoder(poses, True)
recon_poses = decoder(feat)
print('input', poses.shape)
print('feat', feat.shape)
print('output', recon_poses.shape)
| 10,527 | 30.806647 | 113 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/model/multimodal_context_net.py | import torch
import torch.nn as nn
from model import vocab
import model.embedding_net
from model.tcn import TemporalConvNet
class WavEncoder(nn.Module):
def __init__(self):
super().__init__()
self.feat_extractor = nn.Sequential(
nn.Conv1d(1, 16, 15, stride=5, padding=1600),
nn.BatchNorm1d(16),
nn.LeakyReLU(0.3, inplace=True),
nn.Conv1d(16, 32, 15, stride=6),
nn.BatchNorm1d(32),
nn.LeakyReLU(0.3, inplace=True),
nn.Conv1d(32, 64, 15, stride=6),
nn.BatchNorm1d(64),
nn.LeakyReLU(0.3, inplace=True),
nn.Conv1d(64, 32, 15, stride=6),
)
def forward(self, wav_data):
wav_data = wav_data.unsqueeze(1) # add channel dim
out = self.feat_extractor(wav_data)
return out.transpose(1, 2) # to (batch x seq x dim)
class TextEncoderTCN(nn.Module):
""" based on https://github.com/locuslab/TCN/blob/master/TCN/word_cnn/model.py """
def __init__(self, args, n_words, embed_size=300, pre_trained_embedding=None,
kernel_size=2, dropout=0.3, emb_dropout=0.1):
super(TextEncoderTCN, self).__init__()
if pre_trained_embedding is not None: # use pre-trained embedding (fasttext)
assert pre_trained_embedding.shape[0] == n_words
assert pre_trained_embedding.shape[1] == embed_size
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(pre_trained_embedding),
freeze=args.freeze_wordembed)
else:
self.embedding = nn.Embedding(n_words, embed_size)
num_channels = [args.hidden_size] * args.n_layers
self.tcn = TemporalConvNet(embed_size, num_channels, kernel_size, dropout=dropout)
self.decoder = nn.Linear(num_channels[-1], 32)
self.drop = nn.Dropout(emb_dropout)
self.emb_dropout = emb_dropout
self.init_weights()
def init_weights(self):
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.normal_(0, 0.01)
def forward(self, input):
emb = self.drop(self.embedding(input))
y = self.tcn(emb.transpose(1, 2)).transpose(1, 2)
y = self.decoder(y)
return y.contiguous(), 0
class PoseGenerator(nn.Module):
def __init__(self, args, pose_dim, n_words, word_embed_size, word_embeddings, z_obj=None):
super().__init__()
self.pre_length = args.n_pre_poses
self.gen_length = args.n_poses - args.n_pre_poses
self.z_obj = z_obj
self.input_context = args.input_context
if self.input_context == 'both':
self.in_size = 32 + 32 + pose_dim + 1 # audio_feat + text_feat + last pose + constraint bit
elif self.input_context == 'none':
self.in_size = pose_dim + 1
else:
self.in_size = 32 + pose_dim + 1 # audio or text only
self.audio_encoder = WavEncoder()
self.text_encoder = TextEncoderTCN(args, n_words, word_embed_size, pre_trained_embedding=word_embeddings,
dropout=args.dropout_prob)
self.speaker_embedding = None
if self.z_obj:
self.z_size = 16
self.in_size += self.z_size
if isinstance(self.z_obj, vocab.Vocab):
self.speaker_embedding = nn.Sequential(
nn.Embedding(z_obj.n_words, self.z_size),
nn.Linear(self.z_size, self.z_size)
)
self.speaker_mu = nn.Linear(self.z_size, self.z_size)
self.speaker_logvar = nn.Linear(self.z_size, self.z_size)
else:
pass # random noise
self.hidden_size = args.hidden_size
self.gru = nn.GRU(self.in_size, hidden_size=self.hidden_size, num_layers=args.n_layers, batch_first=True,
bidirectional=True, dropout=args.dropout_prob)
self.out = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size//2),
nn.LeakyReLU(True),
nn.Linear(self.hidden_size//2, pose_dim)
)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, pre_seq, in_text, in_audio, vid_indices=None):
decoder_hidden = None
if self.do_flatten_parameters:
self.gru.flatten_parameters()
text_feat_seq = audio_feat_seq = None
if self.input_context != 'none':
# audio
audio_feat_seq = self.audio_encoder(in_audio) # output (bs, n_frames, feat_size)
# text
text_feat_seq, _ = self.text_encoder(in_text)
assert(audio_feat_seq.shape[1] == text_feat_seq.shape[1])
# z vector; speaker embedding or random noise
if self.z_obj:
if self.speaker_embedding:
assert vid_indices is not None
z_context = self.speaker_embedding(vid_indices)
z_mu = self.speaker_mu(z_context)
z_logvar = self.speaker_logvar(z_context)
z_context = model.embedding_net.reparameterize(z_mu, z_logvar)
else:
z_mu = z_logvar = None
z_context = torch.randn(in_text.shape[0], self.z_size, device=in_text.device)
else:
z_mu = z_logvar = None
z_context = None
if self.input_context == 'both':
in_data = torch.cat((pre_seq, audio_feat_seq, text_feat_seq), dim=2)
elif self.input_context == 'audio':
in_data = torch.cat((pre_seq, audio_feat_seq), dim=2)
elif self.input_context == 'text':
in_data = torch.cat((pre_seq, text_feat_seq), dim=2)
elif self.input_context == 'none':
in_data = pre_seq
else:
assert False
if z_context is not None:
repeated_z = z_context.unsqueeze(1)
repeated_z = repeated_z.repeat(1, in_data.shape[1], 1)
in_data = torch.cat((in_data, repeated_z), dim=2)
output, decoder_hidden = self.gru(in_data, decoder_hidden)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
output = self.out(output.reshape(-1, output.shape[2]))
decoder_outputs = output.reshape(in_data.shape[0], in_data.shape[1], -1)
return decoder_outputs, z_context, z_mu, z_logvar
class Discriminator(nn.Module):
def __init__(self, args, input_size, n_words=None, word_embed_size=None, word_embeddings=None):
super().__init__()
self.input_size = input_size
if n_words and word_embed_size:
self.text_encoder = TextEncoderTCN(n_words, word_embed_size, word_embeddings)
input_size += 32
else:
self.text_encoder = None
self.hidden_size = args.hidden_size
self.gru = nn.GRU(input_size, hidden_size=self.hidden_size, num_layers=args.n_layers, bidirectional=True,
dropout=args.dropout_prob, batch_first=True)
self.out = nn.Linear(self.hidden_size, 1)
self.out2 = nn.Linear(args.n_poses, 1)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, poses, in_text=None):
decoder_hidden = None
if self.do_flatten_parameters:
self.gru.flatten_parameters()
if self.text_encoder:
text_feat_seq, _ = self.text_encoder(in_text)
poses = torch.cat((poses, text_feat_seq), dim=2)
output, decoder_hidden = self.gru(poses, decoder_hidden)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
# use the last N outputs
batch_size = poses.shape[0]
output = output.contiguous().view(-1, output.shape[2])
output = self.out(output) # apply linear to every output
output = output.view(batch_size, -1)
output = self.out2(output)
output = torch.sigmoid(output)
return output
class ConvDiscriminator(nn.Module):
def __init__(self, input_size):
super().__init__()
self.input_size = input_size
self.hidden_size = 64
self.pre_conv = nn.Sequential(
nn.Conv1d(input_size, 16, 3),
nn.BatchNorm1d(16),
nn.LeakyReLU(True),
nn.Conv1d(16, 8, 3),
nn.BatchNorm1d(8),
nn.LeakyReLU(True),
nn.Conv1d(8, 8, 3),
)
self.gru = nn.GRU(8, hidden_size=self.hidden_size, num_layers=4, bidirectional=True,
dropout=0.3, batch_first=True)
self.out = nn.Linear(self.hidden_size, 1)
self.out2 = nn.Linear(28, 1)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, poses, in_text=None):
decoder_hidden = None
if self.do_flatten_parameters:
self.gru.flatten_parameters()
poses = poses.transpose(1, 2)
feat = self.pre_conv(poses)
feat = feat.transpose(1, 2)
output, decoder_hidden = self.gru(feat, decoder_hidden)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
# use the last N outputs
batch_size = poses.shape[0]
output = output.contiguous().view(-1, output.shape[2])
output = self.out(output) # apply linear to every output
output = output.view(batch_size, -1)
output = self.out2(output)
output = torch.sigmoid(output)
return output
| 9,831 | 37.86166 | 113 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/model/speech2gesture.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
""" reimplement speech2gesture model(https://github.com/amirbar/speech2gesture) with pytorch """
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
from https://github.com/mlperf/inference/blob/482f6a3beb7af2fb0bd2d91d6185d5e71c22c55f/others/edge/object_detection/ssd_mobilenet/pytorch/utils.py
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get("padding", "SAME")
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == "VALID":
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=self.dilation,
groups=self.groups,
)
class Conv1d_tf(nn.Conv1d):
"""
Conv1d with the padding behavior from TF
modified from https://github.com/mlperf/inference/blob/482f6a3beb7af2fb0bd2d91d6185d5e71c22c55f/others/edge/object_detection/ssd_mobilenet/pytorch/utils.py
"""
def __init__(self, *args, **kwargs):
super(Conv1d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get("padding", "SAME")
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == "VALID":
return F.conv1d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
if rows_odd:
input = F.pad(input, [0, rows_odd])
return F.conv1d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2),
dilation=self.dilation,
groups=self.groups,
)
def ConvNormRelu(in_channels, out_channels, type='1d', downsample=False, k=None, s=None, padding='SAME'):
if k is None and s is None:
if not downsample:
k = 3
s = 1
else:
k = 4
s = 2
if type == '1d':
conv_block = Conv1d_tf(in_channels, out_channels, kernel_size=k, stride=s, padding=padding)
norm_block = nn.BatchNorm1d(out_channels)
elif type == '2d':
conv_block = Conv2d_tf(in_channels, out_channels, kernel_size=k, stride=s, padding=padding)
norm_block = nn.BatchNorm2d(out_channels)
else:
assert False
return nn.Sequential(
conv_block,
norm_block,
nn.LeakyReLU(0.2, True)
)
class UnetUp(nn.Module):
def __init__(self, in_ch, out_ch):
super(UnetUp, self).__init__()
self.conv = ConvNormRelu(in_ch, out_ch)
def forward(self, x1, x2):
x1 = torch.repeat_interleave(x1, 2, dim=2)
x1 = x1[:, :, :x2.shape[2]] # to match dim
x = x1 + x2 # it is different to the original UNET, but I stick to speech2gesture implementation
x = self.conv(x)
return x
class AudioEncoder(nn.Module):
def __init__(self, n_frames):
super().__init__()
self.n_frames = n_frames
self.first_net = nn.Sequential(
ConvNormRelu(1, 64, '2d', False),
ConvNormRelu(64, 64, '2d', True),
ConvNormRelu(64, 128, '2d', False),
ConvNormRelu(128, 128, '2d', True),
ConvNormRelu(128, 256, '2d', False),
ConvNormRelu(256, 256, '2d', True),
ConvNormRelu(256, 256, '2d', False),
ConvNormRelu(256, 256, '2d', False, padding='VALID')
)
self.make_1d = torch.nn.Upsample((n_frames, 1), mode='bilinear', align_corners=False)
self.down1 = nn.Sequential(
ConvNormRelu(256, 256, '1d', False),
ConvNormRelu(256, 256, '1d', False)
)
self.down2 = ConvNormRelu(256, 256, '1d', True)
self.down3 = ConvNormRelu(256, 256, '1d', True)
self.down4 = ConvNormRelu(256, 256, '1d', True)
self.down5 = ConvNormRelu(256, 256, '1d', True)
self.down6 = ConvNormRelu(256, 256, '1d', True)
self.up1 = UnetUp(256, 256)
self.up2 = UnetUp(256, 256)
self.up3 = UnetUp(256, 256)
self.up4 = UnetUp(256, 256)
self.up5 = UnetUp(256, 256)
def forward(self, spectrogram):
spectrogram = spectrogram.unsqueeze(1) # add channel dim
# print(spectrogram.shape)
spectrogram = spectrogram.float()
out = self.first_net(spectrogram)
out = self.make_1d(out)
x1 = out.squeeze(3)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x6 = self.down5(x5)
x7 = self.down6(x6)
x = self.up1(x7, x6)
x = self.up2(x, x5)
x = self.up3(x, x4)
x = self.up4(x, x3)
x = self.up5(x, x2)
return x
class Generator(nn.Module):
def __init__(self, n_poses, pose_dim, n_pre_poses):
super().__init__()
self.gen_length = n_poses
self.audio_encoder = AudioEncoder(n_poses)
self.pre_pose_encoder = nn.Sequential(
nn.Linear(n_pre_poses * pose_dim, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.Linear(32, 16)
)
self.decoder = nn.Sequential(
ConvNormRelu(256 + 16, 256),
ConvNormRelu(256, 256),
ConvNormRelu(256, 256),
ConvNormRelu(256, 256)
)
self.final_out = nn.Conv1d(256, pose_dim, 1, 1)
def forward(self, in_spec, pre_poses):
audio_feat_seq = self.audio_encoder(in_spec) # output (bs, feat_size, n_frames)
pre_poses = pre_poses.reshape(pre_poses.shape[0], -1)
pre_pose_feat = self.pre_pose_encoder(pre_poses) # output (bs, 16)
pre_pose_feat = pre_pose_feat.unsqueeze(2).repeat(1, 1, self.gen_length)
feat = torch.cat((audio_feat_seq, pre_pose_feat), dim=1)
out = self.decoder(feat)
out = self.final_out(out)
out = out.transpose(1, 2) # to (batch, seq, dim)
return out
class Discriminator(nn.Module):
def __init__(self, pose_dim):
super().__init__()
self.net = nn.Sequential(
Conv1d_tf(pose_dim, 64, kernel_size=4, stride=2, padding='SAME'),
nn.LeakyReLU(0.2, True),
ConvNormRelu(64, 128, '1d', True),
ConvNormRelu(128, 256, '1d', k=4, s=1),
Conv1d_tf(256, 1, kernel_size=4, stride=1, padding='SAME'),
)
def forward(self, x):
x = x[:, 1:] - x[:, :-1] # pose differences
x = x.transpose(1, 2) # to (batch, dim, seq)
out = self.net(x)
return out
if __name__ == '__main__':
# for model debugging
pose_dim = 16
generator = Generator(64, pose_dim, 4)
spec = torch.randn((4, 128, 64))
pre_poses = torch.randn((4, 4, pose_dim))
generated = generator(spec, pre_poses)
print('spectrogram', spec.shape)
print('output', generated.shape)
discriminator = Discriminator(pose_dim)
out = discriminator(generated)
print('discrimination output', out.shape)
| 8,841 | 32.116105 | 159 | py |
Gesture-Generation-from-Trimodal-Context | Gesture-Generation-from-Trimodal-Context-master/scripts/model/tcn.py | """ from https://github.com/locuslab/TCN/blob/master/TCN/tcn.py """
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
| 2,536 | 38.030769 | 110 | py |
neuralqa | neuralqa-master/setup.py | import os
from importlib.machinery import SourceFileLoader
from setuptools import setup, find_packages
version = SourceFileLoader('neuralqa.version', os.path.join(
'neuralqa', 'version.py')).load_module().VERSION
def package_files(directory):
paths = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
ui_files = package_files("neuralqa/server/ui/build")
yaml_file = ["config_default.yaml"]
setup(
name='neuralqa',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={"neuralqa": ui_files + yaml_file},
version=version,
license='MIT',
description='NeuralQA: Question Answering on Large Datasets',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
author='Victor Dibia',
url='https://github.com/victordibia/neuralqa',
python_requires='>=3.5',
# download_url='https://github.com/victordibia/neuralqa/archive/v0.0.2.tar.gz',
keywords=['NLP', 'Question Answering', 'Machine Learning'],
install_requires=[
'fastapi',
'aiofiles',
'uvicorn',
'numpy',
'tensorflow>=2.1.0',
'torch',
'torchvision',
'transformers',
'elasticsearch>=7.7.1',
'pyyaml>=3.13',
'spacy'
],
extras_require={
'test': ['pytest']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
entry_points={
"console_scripts": [
"neuralqa=neuralqa.cli:cli",
]
}
)
| 1,762 | 27.435484 | 83 | py |
DeepAligned-Clustering | DeepAligned-Clustering-main/pretrain.py | from util import *
from model import *
from dataloader import *
class PretrainModelManager:
def __init__(self, args, data):
set_seed(args.seed)
self.model = BertForModel.from_pretrained(args.bert_model, cache_dir = "", num_labels = data.n_known_cls)
if args.freeze_bert_parameters:
self.freeze_parameters(self.model)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
n_gpu = torch.cuda.device_count()
if n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
self.num_train_optimization_steps = int(len(data.train_labeled_examples) / args.train_batch_size) * args.num_train_epochs
self.optimizer = self.get_optimizer(args)
self.best_eval_score = 0
def eval(self, args, data):
self.model.eval()
total_labels = torch.empty(0,dtype=torch.long).to(self.device)
total_logits = torch.empty((0, data.n_known_cls)).to(self.device)
for batch in tqdm(data.eval_dataloader, desc="Iteration"):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.set_grad_enabled(False):
_, logits = self.model(input_ids, segment_ids, input_mask, mode = 'eval')
total_labels = torch.cat((total_labels,label_ids))
total_logits = torch.cat((total_logits, logits))
total_probs, total_preds = F.softmax(total_logits.detach(), dim=1).max(dim = 1)
y_pred = total_preds.cpu().numpy()
y_true = total_labels.cpu().numpy()
acc = round(accuracy_score(y_true, y_pred) * 100, 2)
return acc
def train(self, args, data):
wait = 0
best_model = None
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
self.model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(data.train_labeled_dataloader, desc="Iteration")):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.set_grad_enabled(True):
loss = self.model(input_ids, segment_ids, input_mask, label_ids, mode = "train")
loss.backward()
tr_loss += loss.item()
self.optimizer.step()
self.optimizer.zero_grad()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
loss = tr_loss / nb_tr_steps
print('train_loss',loss)
eval_score = self.eval(args, data)
print('eval_score',eval_score)
if eval_score > self.best_eval_score:
best_model = copy.deepcopy(self.model)
wait = 0
self.best_eval_score = eval_score
else:
wait += 1
if wait >= args.wait_patient:
break
self.model = best_model
if args.save_model:
self.save_model(args)
def get_optimizer(self, args):
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr = args.lr_pre,
warmup = args.warmup_proportion,
t_total = self.num_train_optimization_steps)
return optimizer
def save_model(self, args):
if not os.path.exists(args.pretrain_dir):
os.makedirs(args.pretrain_dir)
self.save_model = self.model.module if hasattr(self.model, 'module') else self.model
model_file = os.path.join(args.pretrain_dir, WEIGHTS_NAME)
model_config_file = os.path.join(args.pretrain_dir, CONFIG_NAME)
torch.save(self.save_model.state_dict(), model_file)
with open(model_config_file, "w") as f:
f.write(self.save_model.config.to_json_string())
def freeze_parameters(self,model):
for name, param in model.bert.named_parameters():
param.requires_grad = False
if "encoder.layer.11" in name or "pooler" in name:
param.requires_grad = True
| 4,907 | 40.59322 | 129 | py |
DeepAligned-Clustering | DeepAligned-Clustering-main/DeepAligned.py | from model import *
from init_parameter import *
from dataloader import *
from pretrain import *
from util import *
class ModelManager:
def __init__(self, args, data, pretrained_model=None):
if pretrained_model is None:
pretrained_model = BertForModel.from_pretrained(args.bert_model, cache_dir = "", num_labels = data.n_known_cls)
if os.path.exists(args.pretrain_dir):
pretrained_model = self.restore_model(args.pretrained_model)
self.pretrained_model = pretrained_model
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.cluster_num_factor > 1:
self.num_labels = self.predict_k(args, data)
else:
self.num_labels = data.num_labels
self.model = BertForModel.from_pretrained(args.bert_model, cache_dir = "", num_labels = self.num_labels)
if args.pretrain:
self.load_pretrained_model(args)
if args.freeze_bert_parameters:
self.freeze_parameters(self.model)
self.model.to(self.device)
num_train_examples = len(data.train_labeled_examples) + len(data.train_unlabeled_examples)
self.num_train_optimization_steps = int(num_train_examples / args.train_batch_size) * args.num_train_epochs
self.optimizer = self.get_optimizer(args)
self.best_eval_score = 0
self.centroids = None
self.test_results = None
self.predictions = None
self.true_labels = None
def get_features_labels(self, dataloader, model, args):
model.eval()
total_features = torch.empty((0,args.feat_dim)).to(self.device)
total_labels = torch.empty(0,dtype=torch.long).to(self.device)
for batch in tqdm(dataloader, desc="Extracting representation"):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.no_grad():
feature = model(input_ids, segment_ids, input_mask, feature_ext = True)
total_features = torch.cat((total_features, feature))
total_labels = torch.cat((total_labels, label_ids))
return total_features, total_labels
def predict_k(self, args, data):
feats, _ = self.get_features_labels(data.train_semi_dataloader, self.pretrained_model, args)
feats = feats.cpu().numpy()
km = KMeans(n_clusters = data.num_labels).fit(feats)
y_pred = km.labels_
pred_label_list = np.unique(y_pred)
drop_out = len(feats) / data.num_labels
print('drop',drop_out)
cnt = 0
for label in pred_label_list:
num = len(y_pred[y_pred == label])
if num < drop_out:
cnt += 1
num_labels = len(pred_label_list) - cnt
print('pred_num',num_labels)
return num_labels
def get_optimizer(self, args):
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr = args.lr,
warmup = args.warmup_proportion,
t_total = self.num_train_optimization_steps)
return optimizer
def evaluation(self, args, data):
feats, labels = self.get_features_labels(data.test_dataloader, self.model, args)
feats = feats.cpu().numpy()
km = KMeans(n_clusters = self.num_labels).fit(feats)
y_pred = km.labels_
y_true = labels.cpu().numpy()
results = clustering_score(y_true, y_pred)
print('results',results)
ind, _ = hungray_aligment(y_true, y_pred)
map_ = {i[0]:i[1] for i in ind}
y_pred = np.array([map_[idx] for idx in y_pred])
cm = confusion_matrix(y_true,y_pred)
print('confusion matrix',cm)
self.test_results = results
self.save_results(args)
def alignment(self, km, args):
if self.centroids is not None:
old_centroids = self.centroids.cpu().numpy()
new_centroids = km.cluster_centers_
DistanceMatrix = np.linalg.norm(old_centroids[:,np.newaxis,:]-new_centroids[np.newaxis,:,:],axis=2)
row_ind, col_ind = linear_sum_assignment(DistanceMatrix)
new_centroids = torch.tensor(new_centroids).to(self.device)
self.centroids = torch.empty(self.num_labels ,args.feat_dim).to(self.device)
alignment_labels = list(col_ind)
for i in range(self.num_labels):
label = alignment_labels[i]
self.centroids[i] = new_centroids[label]
pseudo2label = {label:i for i,label in enumerate(alignment_labels)}
pseudo_labels = np.array([pseudo2label[label] for label in km.labels_])
else:
self.centroids = torch.tensor(km.cluster_centers_).to(self.device)
pseudo_labels = km.labels_
pseudo_labels = torch.tensor(pseudo_labels, dtype=torch.long).to(self.device)
return pseudo_labels
def update_pseudo_labels(self, pseudo_labels, args, data):
train_data = TensorDataset(data.semi_input_ids, data.semi_input_mask, data.semi_segment_ids, pseudo_labels)
train_sampler = SequentialSampler(train_data)
train_dataloader = DataLoader(train_data, sampler = train_sampler, batch_size = args.train_batch_size)
return train_dataloader
def train(self, args, data):
best_score = 0
best_model = None
wait = 0
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
feats, _ = self.get_features_labels(data.train_semi_dataloader, self.model, args)
feats = feats.cpu().numpy()
km = KMeans(n_clusters = self.num_labels).fit(feats)
score = metrics.silhouette_score(feats, km.labels_)
print('score',score)
if score > best_score:
best_model = copy.deepcopy(self.model)
wait = 0
best_score = score
else:
wait += 1
if wait >= args.wait_patient:
self.model = best_model
break
pseudo_labels = self.alignment(km, args)
train_dataloader = self.update_pseudo_labels(pseudo_labels, args, data)
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
self.model.train()
for batch in tqdm(train_dataloader, desc="Pseudo-Training"):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = self.model(input_ids, segment_ids, input_mask, label_ids, mode='train')
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
self.optimizer.step()
self.optimizer.zero_grad()
tr_loss = tr_loss / nb_tr_steps
print('train_loss',tr_loss)
def load_pretrained_model(self, args):
pretrained_dict = self.pretrained_model.state_dict()
classifier_params = ['classifier.weight','classifier.bias']
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k not in classifier_params}
self.model.load_state_dict(pretrained_dict, strict=False)
def restore_model(self, args, model):
output_model_file = os.path.join(args.pretrain_dir, WEIGHTS_NAME)
model.load_state_dict(torch.load(output_model_file))
return model
def freeze_parameters(self,model):
for name, param in model.bert.named_parameters():
param.requires_grad = False
if "encoder.layer.11" in name or "pooler" in name:
param.requires_grad = True
def save_results(self, args):
if not os.path.exists(args.save_results_path):
os.makedirs(args.save_results_path)
var = [args.dataset, args.method, args.known_cls_ratio, args.labeled_ratio, args.cluster_num_factor, args.seed, self.num_labels]
names = ['dataset', 'method', 'known_cls_ratio', 'labeled_ratio', 'cluster_num_factor','seed', 'K']
vars_dict = {k:v for k,v in zip(names, var) }
results = dict(self.test_results,**vars_dict)
keys = list(results.keys())
values = list(results.values())
file_name = 'results.csv'
results_path = os.path.join(args.save_results_path, file_name)
if not os.path.exists(results_path):
ori = []
ori.append(values)
df1 = pd.DataFrame(ori,columns = keys)
df1.to_csv(results_path,index=False)
else:
df1 = pd.read_csv(results_path)
new = pd.DataFrame(results,index=[1])
df1 = df1.append(new,ignore_index=True)
df1.to_csv(results_path,index=False)
data_diagram = pd.read_csv(results_path)
print('test_results', data_diagram)
if __name__ == '__main__':
print('Data and Parameters Initialization...')
parser = init_model()
args = parser.parse_args()
data = Data(args)
if args.pretrain:
print('Pre-training begin...')
manager_p = PretrainModelManager(args, data)
manager_p.train(args, data)
print('Pre-training finished!')
manager = ModelManager(args, data, manager_p.model)
else:
manager = ModelManager(args, data)
print('Training begin...')
manager.train(args,data)
print('Training finished!')
print('Evaluation begin...')
manager.evaluation(args, data)
print('Evaluation finished!')
manager.save_results(args)
| 10,443 | 36.3 | 136 | py |
DeepAligned-Clustering | DeepAligned-Clustering-main/dataloader.py | from util import *
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
class Data:
def __init__(self, args):
set_seed(args.seed)
max_seq_lengths = {'clinc':30, 'stackoverflow':45,'banking':55}
args.max_seq_length = max_seq_lengths[args.dataset]
processor = DatasetProcessor()
self.data_dir = os.path.join(args.data_dir, args.dataset)
self.all_label_list = processor.get_labels(self.data_dir)
self.n_known_cls = round(len(self.all_label_list) * args.known_cls_ratio)
self.known_label_list = list(np.random.choice(np.array(self.all_label_list), self.n_known_cls, replace=False))
self.num_labels = int(len(self.all_label_list) * args.cluster_num_factor)
self.train_labeled_examples, self.train_unlabeled_examples = self.get_examples(processor, args, 'train')
print('num_labeled_samples',len(self.train_labeled_examples))
print('num_unlabeled_samples',len(self.train_unlabeled_examples))
self.eval_examples = self.get_examples(processor, args, 'eval')
self.test_examples = self.get_examples(processor, args, 'test')
self.train_labeled_dataloader = self.get_loader(self.train_labeled_examples, args, 'train')
self.semi_input_ids, self.semi_input_mask, self.semi_segment_ids, self.semi_label_ids = self.get_semi(self.train_labeled_examples, self.train_unlabeled_examples, args)
self.train_semi_dataloader = self.get_semi_loader(self.semi_input_ids, self.semi_input_mask, self.semi_segment_ids, self.semi_label_ids, args)
self.eval_dataloader = self.get_loader(self.eval_examples, args, 'eval')
self.test_dataloader = self.get_loader(self.test_examples, args, 'test')
def get_examples(self, processor, args, mode = 'train'):
ori_examples = processor.get_examples(self.data_dir, mode)
if mode == 'train':
train_labels = np.array([example.label for example in ori_examples])
train_labeled_ids = []
for label in self.known_label_list:
num = round(len(train_labels[train_labels == label]) * args.labeled_ratio)
pos = list(np.where(train_labels == label)[0])
train_labeled_ids.extend(random.sample(pos, num))
train_labeled_examples, train_unlabeled_examples = [], []
for idx, example in enumerate(ori_examples):
if idx in train_labeled_ids:
train_labeled_examples.append(example)
else:
train_unlabeled_examples.append(example)
return train_labeled_examples, train_unlabeled_examples
elif mode == 'eval':
eval_examples = []
for example in ori_examples:
if example.label in self.known_label_list:
eval_examples.append(example)
return eval_examples
elif mode == 'test':
return ori_examples
return examples
def get_semi(self, labeled_examples, unlabeled_examples, args):
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
labeled_features = convert_examples_to_features(labeled_examples, self.known_label_list, args.max_seq_length, tokenizer)
unlabeled_features = convert_examples_to_features(unlabeled_examples, self.all_label_list, args.max_seq_length, tokenizer)
labeled_input_ids = torch.tensor([f.input_ids for f in labeled_features], dtype=torch.long)
labeled_input_mask = torch.tensor([f.input_mask for f in labeled_features], dtype=torch.long)
labeled_segment_ids = torch.tensor([f.segment_ids for f in labeled_features], dtype=torch.long)
labeled_label_ids = torch.tensor([f.label_id for f in labeled_features], dtype=torch.long)
unlabeled_input_ids = torch.tensor([f.input_ids for f in unlabeled_features], dtype=torch.long)
unlabeled_input_mask = torch.tensor([f.input_mask for f in unlabeled_features], dtype=torch.long)
unlabeled_segment_ids = torch.tensor([f.segment_ids for f in unlabeled_features], dtype=torch.long)
unlabeled_label_ids = torch.tensor([-1 for f in unlabeled_features], dtype=torch.long)
semi_input_ids = torch.cat([labeled_input_ids, unlabeled_input_ids])
semi_input_mask = torch.cat([labeled_input_mask, unlabeled_input_mask])
semi_segment_ids = torch.cat([labeled_segment_ids, unlabeled_segment_ids])
semi_label_ids = torch.cat([labeled_label_ids, unlabeled_label_ids])
return semi_input_ids, semi_input_mask, semi_segment_ids, semi_label_ids
def get_semi_loader(self, semi_input_ids, semi_input_mask, semi_segment_ids, semi_label_ids, args):
semi_data = TensorDataset(semi_input_ids, semi_input_mask, semi_segment_ids, semi_label_ids)
semi_sampler = SequentialSampler(semi_data)
semi_dataloader = DataLoader(semi_data, sampler=semi_sampler, batch_size = args.train_batch_size)
return semi_dataloader
def get_loader(self, examples, args, mode = 'train'):
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
if mode == 'train' or mode == 'eval':
features = convert_examples_to_features(examples, self.known_label_list, args.max_seq_length, tokenizer)
elif mode == 'test':
features = convert_examples_to_features(examples, self.all_label_list, args.max_seq_length, tokenizer)
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
data = TensorDataset(input_ids, input_mask, segment_ids, label_ids)
if mode == 'train':
sampler = RandomSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size = args.train_batch_size)
elif mode == 'eval' or mode == 'test':
sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size = args.eval_batch_size)
return dataloader
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class DatasetProcessor(DataProcessor):
def get_examples(self, data_dir, mode):
if mode == 'train':
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
elif mode == 'eval':
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "train")
elif mode == 'test':
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self, data_dir):
"""See base class."""
import pandas as pd
test = pd.read_csv(os.path.join(data_dir, "train.tsv"), sep="\t")
labels = np.unique(np.array(test['label']))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if len(line) != 2:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for i, label in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info(
# "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop(0) # For dialogue context
else:
tokens_b.pop()
| 13,724 | 44.598007 | 175 | py |
DeepAligned-Clustering | DeepAligned-Clustering-main/util.py | import itertools
import subprocess
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import copy
import torch.nn.functional as F
import random
import csv
import sys
from torch import nn
from tqdm import tqdm_notebook, trange, tqdm
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.modeling import WEIGHTS_NAME,CONFIG_NAME,BertPreTrainedModel,BertModel
from pytorch_pretrained_bert.tokenization import BertTokenizer
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix,normalized_mutual_info_score, adjusted_rand_score, accuracy_score
from scipy.optimize import linear_sum_assignment
from sklearn import metrics
def hungray_aligment(y_true, y_pred):
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D))
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = np.transpose(np.asarray(linear_sum_assignment(w.max() - w)))
return ind, w
def clustering_accuracy_score(y_true, y_pred):
ind, w = hungray_aligment(y_true, y_pred)
acc = sum([w[i, j] for i, j in ind]) / y_pred.size
return acc
def clustering_score(y_true, y_pred):
return {'ACC': round(clustering_accuracy_score(y_true, y_pred)*100, 2),
'ARI': round(adjusted_rand_score(y_true, y_pred)*100, 2),
'NMI': round(normalized_mutual_info_score(y_true, y_pred)*100, 2)}
| 1,544 | 31.87234 | 110 | py |
lm-intervention | lm-intervention-master/experiment_num_agreement.py |
import torch
# import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# import random
from functools import partial
from tqdm import tqdm
# from tqdm import tqdm_notebook
import math
import statistics
from utils_num_agreement import batch, convert_results_to_pd
from transformers import (
GPT2LMHeadModel, GPT2Tokenizer,
TransfoXLTokenizer,
XLNetTokenizer,
BertForMaskedLM, BertTokenizer
)
from transformers_modified.modeling_transfo_xl import TransfoXLLMHeadModel
from transformers_modified.modeling_xlnet import XLNetLMHeadModel
from attention_intervention_model import (
AttentionOverride, TXLAttentionOverride, XLNetAttentionOverride, BertAttentionOverride
)
# sns.set(style="ticks", color_codes=True)
np.random.seed(1)
torch.manual_seed(1)
# Padding text for XLNet (from examples/text-generation/run_generation.py)
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
class Intervention():
'''
Wrapper for all the possible interventions
'''
def __init__(self,
tokenizer,
base_string: str,
substitutes: list,
candidates: list,
device='cpu'):
super()
self.device = device
self.enc = tokenizer
if isinstance(tokenizer, XLNetTokenizer):
base_string = PADDING_TEXT + ' ' + base_string
# All the initial strings
# First item should be neutral, others tainted
self.base_strings = [base_string.format(s)
for s in substitutes]
# Tokenized bases
#self.base_strings_tok = [self.enc.encode(s)
# for s in self.base_strings]
# print(self.base_strings_tok)
#self.base_strings_tok = torch.LongTensor(self.base_strings_tok)\
# .to(device)
self.base_strings_tok = [
self.enc.encode(s, add_special_tokens=False,
add_space_before_punct_symbol=True)
for s in self.base_strings
]
self.base_strings_tok = torch.LongTensor(self.base_strings_tok).to(device)
# Where to intervene
#self.position = base_string.split().index('{}')
if isinstance(tokenizer, XLNetTokenizer):
diff = len(base_string.split()) - base_string.split().index('{}')
self.position = len(self.base_strings_tok[0]) - diff
assert len(self.base_strings_tok[0]) == len(self.base_strings_tok[1])
else:
self.position = base_string.split().index('{}')
self.candidates = []
for c in candidates:
# 'a ' added to input so that tokenizer understand that first word
# follows a space.
# tokens = self.enc.tokenize('. ' + c)[1:]
tokens = self.enc.tokenize('a ' + c,
add_space_before_punct_symbol=True)[1:]
assert(len(tokens) == 1)
self.candidates.append(tokens)
for s in substitutes:
# 'a ' added to input so that tokenizer understand that first word
# follows a space.
tokens = self.enc.tokenize('a ' + s,
add_space_before_punct_symbol=True)[1:]
assert(len(tokens) == 1)
self.candidates_tok = [self.enc.convert_tokens_to_ids(tokens)
for tokens in self.candidates]
class Model():
'''
Wrapper for all model logic
'''
def __init__(self,
device='cpu',
output_attentions=False,
random_weights=False,
gpt2_version='gpt2'):
super()
# check what model architecture we're using
self.is_gpt2 = (gpt2_version.startswith('gpt2') or
gpt2_version.startswith('distilgpt2'))
self.is_txl = gpt2_version.startswith('transfo-xl')
self.is_xlnet = gpt2_version.startswith('xlnet')
self.is_bert = gpt2_version.startswith('bert')
assert (self.is_gpt2 or self.is_txl or self.is_xlnet or self.is_bert)
self.device = device
#self.model = GPT2LMHeadModel.from_pretrained(
# gpt2_version,
# output_attentions=output_attentions)
self.model = (GPT2LMHeadModel if self.is_gpt2 else
XLNetLMHeadModel if self.is_xlnet else
TransfoXLLMHeadModel if self.is_txl else
BertForMaskedLM).from_pretrained(
gpt2_version,
output_attentions=output_attentions
)
self.model.eval()
self.model.to(device)
if random_weights:
print('Randomizing weights')
self.model.init_weights()
# Options
self.top_k = 5
# 12 for GPT-2
# self.num_layers = len(self.model.transformer.h)
self.num_layers = self.model.config.num_hidden_layers
# 768 for GPT-2
# self.num_neurons = self.model.transformer.wte.weight.shape[1]
self.num_neurons = self.model.config.hidden_size
# 12 for GPT-2
# self.num_heads = self.model.transformer.h[0].attn.n_head
self.num_heads = self.model.config.num_attention_heads
self.masking_approach = 1
tokenizer = (GPT2Tokenizer if self.is_gpt2 else
TransfoXLTokenizer if self.is_txl else
XLNetTokenizer if self.is_xlnet else
BertTokenizer if self.is_bert else
DistilBertTokenizer if self.is_distilbert else
RobertaTokenizer).from_pretrained(gpt2_version)
# Special token id's: (mask, cls, sep)
self.st_ids = (tokenizer.mask_token_id,
tokenizer.cls_token_id,
tokenizer.sep_token_id)
# To account for switched dimensions in model internals:
# Default: [batch_size, seq_len, hidden_dim],
# txl and xlnet: [seq_len, batch_size, hidden_dim]
self.order_dims = lambda a: a
if self.is_gpt2:
self.attention_layer = lambda layer: self.model.transformer.h[layer].attn
self.word_emb_layer = self.model.transformer.wte
self.neuron_layer = lambda layer: self.model.transformer.h[layer].mlp
elif self.is_txl:
self.attention_layer = lambda layer: self.model.transformer.layers[layer].dec_attn
self.word_emb_layer = self.model.transformer.word_emb
self.neuron_layer = lambda layer: self.model.transformer.layers[layer].pos_ff
self.order_dims = lambda a: (a[1], a[0], *a[2:])
elif self.is_xlnet:
self.attention_layer = lambda layer: self.model.transformer.layer[layer].rel_attn
self.word_emb_layer = self.model.transformer.word_embedding
self.neuron_layer = lambda layer: self.model.transformer.layer[layer].ff
self.order_dims = lambda a: (a[1], a[0], *a[2:])
elif self.is_bert:
self.attention_layer = lambda layer: self.model.bert.encoder.layer[layer].attention.self
self.word_emb_layer = self.model.bert.embeddings.word_embeddings
self.neuron_layer = lambda layer: self.model.bert.encoder.layer[layer].output
def mlm_inputs(self, context, candidate):
input_tokens = []
for i in range(len(candidate)):
combined = context + candidate[:i] + [self.st_ids[0]]
if self.masking_approach in [2, 5]:
combined = combined + candidate[i+1:]
elif self.masking_approach in [3, 6]:
combined = combined + [self.st_ids[0]] * len(candidate[i+1:])
if self.masking_approach > 3:
combined = [self.st_ids[1]] + combined + [self.st_ids[2]]
pred_idx = combined.index(self.st_ids[0])
input_tokens.append((combined, pred_idx))
return input_tokens
def xlnet_forward(self, batch, clen):
""" Return the outputs of XLNet's forward pass;
clen = length of the candidate """
bsz, seqlen = batch.shape
perm_mask = torch.triu(
torch.ones((bsz, seqlen, seqlen), device=self.device), diagonal=0)
perm_mask[:, :, :-clen] = 0
#if self.masking_approach == 2:
# perm_mask[:, -clen:, -clen:] = torch.eye(clen)
target_mapping = torch.zeros(
(bsz, clen, seqlen), dtype=torch.float, device=self.device)
target_mapping[:, :, -clen:] = torch.eye(clen)
return self.model(batch,
perm_mask=perm_mask,
target_mapping=target_mapping)
def get_representations(self, context, position):
# Hook for saving the representation
def extract_representation_hook(module,
input,
output,
position,
representations,
layer):
# representations[layer] = output[0][position]
if self.is_xlnet and output.shape[0] == 1: return output
representations[layer] = output[self.order_dims((0, position))]
handles = []
representation = {}
with torch.no_grad():
# construct all the hooks
# word embeddings will be layer -1
# handles.append(self.model.transformer.wte.register_forward_hook(
handles.append(self.word_emb_layer.register_forward_hook(
partial(extract_representation_hook,
position=position,
representations=representation,
layer=-1)))
# hidden layers
for layer in range(self.num_layers):
#handles.append(self.model.transformer.h[layer]\
# .mlp.register_forward_hook(
handles.append(self.neuron_layer(layer).register_forward_hook(
partial(extract_representation_hook,
position=position,
representations=representation,
layer=layer)))
# logits, past = self.model(context)
if self.is_xlnet:
self.xlnet_forward(context.unsqueeze(0), clen=1)
else:
self.model(context.unsqueeze(0))
for h in handles:
h.remove()
# print(representation[0][:5])
return representation
def get_probabilities_for_examples(self, context, candidates):
"""Return probabilities of single-token candidates given context"""
for c in candidates:
if len(c) > 1:
raise ValueError(f"Multiple tokens not allowed: {c}")
outputs = [c[0] for c in candidates]
# logits, past = self.model(context)[:2]
if self.is_xlnet:
logits = self.xlnet_forward(context, clen=1)[0]
else:
logits = self.model(context)[0]
logits = logits[:, -1, :]
probs = F.softmax(logits, dim=-1)
return probs[:, outputs].tolist()
def get_probabilities_for_examples_multitoken(self, context, candidates):
"""
Return probability of multi-token candidates given context.
Prob of each candidate is normalized by number of tokens.
Args:
context: Tensor of token ids in context
candidates: list of list of token ids in each candidate
Returns: list containing probability for each candidate
"""
# TODO: Combine into single batch
token_log_probs = []
mean_probs = []
context = context.tolist()
for candidate in candidates:
if self.is_bert:
mlm_inputs = self.mlm_inputs(context, candidate)
for i, c in enumerate(candidate):
combined, pred_idx = mlm_inputs[i]
batch = torch.tensor(combined).unsqueeze(dim=0).to(self.device)
logits = self.model(batch)[0]
log_probs = F.log_softmax(logits[-1, :, :], dim=-1)
token_log_probs.append(log_probs[pred_idx][c].item())
elif self.is_xlnet:
combined = context + candidate
batch = torch.tensor(combined).unsqueeze(dim=0).to(self.device)
logits = self.xlnet_forward(batch, clen=len(candidate))[0]
log_probs = F.log_softmax(logits[-1, :, :], dim=-1)
for i, next_token_id in enumerate(candidate):
token_log_probs.append(log_probs[i][next_token_id].item())
else:
combined = context + candidate
# Exclude last token position when predicting next token
batch = torch.tensor(combined[:-1]).unsqueeze(dim=0).to(self.device)
# Shape (batch_size, seq_len, vocab_size)
logits = self.model(batch)[0]
# Shape (seq_len, vocab_size)
log_probs = F.log_softmax(logits[-1, :, :], dim=-1)
context_end_pos = len(context) - 1
continuation_end_pos = context_end_pos + len(candidate)
# TODO: Vectorize this
# Up to but not including last token position
for i in range(context_end_pos, continuation_end_pos):
next_token_id = combined[i+1]
next_token_log_prob = log_probs[i][next_token_id].item()
token_log_probs.append(next_token_log_prob)
mean_token_log_prob = statistics.mean(token_log_probs)
mean_token_prob = math.exp(mean_token_log_prob)
mean_probs.append(mean_token_prob)
return mean_probs
def neuron_intervention(self,
context,
outputs,
rep,
layers,
neurons,
position,
intervention_type='diff',
alpha=1.):
# Hook for changing representation during forward pass
def intervention_hook(module,
input,
output,
position,
neurons,
intervention,
intervention_type):
# XLNet: ignore query stream
if self.is_xlnet and output.shape[0] == 1: return output
# Get the neurons to intervene on
neurons = torch.LongTensor(neurons).to(self.device)
# First grab the position across batch
# Then, for each element, get correct index w/ gather
#base = output[:, position, :].gather(
# 1, neurons)
base_slice = self.order_dims((slice(None), position, slice(None)))
base = output[base_slice].gather(1, neurons)
intervention_view = intervention.view_as(base)
if intervention_type == 'replace':
base = intervention_view
elif intervention_type == 'diff':
base += intervention_view
else:
raise ValueError(f"Invalid intervention_type: {intervention_type}")
# Overwrite values in the output
# First define mask where to overwrite
# scatter_mask = torch.zeros_like(output).byte()
scatter_mask = torch.zeros_like(output, dtype=torch.bool)
for i, v in enumerate(neurons):
# scatter_mask[i, position, v] = 1
scatter_mask[self.order_dims((i, position, v))] = 1
# Then take values from base and scatter
output.masked_scatter_(scatter_mask, base.flatten())
# Set up the context as batch
batch_size = len(neurons)
context = context.unsqueeze(0).repeat(batch_size, 1)
handle_list = []
for layer in set(layers):
neuron_loc = np.where(np.array(layers) == layer)[0]
n_list = []
for n in neurons:
unsorted_n_list = [n[i] for i in neuron_loc]
n_list.append(list(np.sort(unsorted_n_list)))
if self.is_txl: m_list = list(np.array(n_list).squeeze())
else: m_list = n_list
intervention_rep = alpha * rep[layer][m_list]
if layer == -1:
handle_list.append(self.word_emb_layer.register_forward_hook(
partial(intervention_hook,
position=position,
neurons=n_list,
intervention=intervention_rep,
intervention_type=intervention_type)))
else:
handle_list.append(self.neuron_layer(layer).register_forward_hook(
partial(intervention_hook,
position=position,
neurons=n_list,
intervention=intervention_rep,
intervention_type=intervention_type)))
new_probabilities = self.get_probabilities_for_examples(
context,
outputs)
for hndle in handle_list:
hndle.remove()
return new_probabilities
def head_pruning_intervention(self,
context,
outputs,
layer,
head):
# Recreate model and prune head
save_model = self.model
# TODO Make this more efficient
self.model = GPT2LMHeadModel.from_pretrained('gpt2')
self.model.prune_heads({layer: [head]})
self.model.eval()
# Compute probabilities without head
new_probabilities = self.get_probabilities_for_examples(
context,
outputs)
# Reinstate original model
# TODO Handle this in cleaner way
self.model = save_model
return new_probabilities
def attention_intervention(self,
context,
outputs,
attn_override_data):
""" Override attention values in specified layer
Args:
context: context text
outputs: candidate outputs
attn_override_data: list of dicts of form:
{
'layer': <index of layer on which to intervene>,
'attention_override': <values to override the computed attention weights.
Shape is [batch_size, num_heads, seq_len, seq_len]>,
'attention_override_mask': <indicates which attention weights to override.
Shape is [batch_size, num_heads, seq_len, seq_len]>
}
"""
def intervention_hook(module, input, outputs, attn_override, attn_override_mask):
#attention_override_module = AttentionOverride(
# module, attn_override, attn_override_mask)
attention_override_module = (AttentionOverride if self.is_gpt2 else
TXLAttentionOverride if self.is_txl else
XLNetAttentionOverride)(
module, attn_override, attn_override_mask
)
# outputs[:] = attention_override_module(*input)
return attention_override_module(*input)
with torch.no_grad():
hooks = []
for d in attn_override_data:
# use the statement in the line below for the `swap_number` intervention.
attn_override = d['attention_override']
# uncomment the line below to use the `zero` intervention.
# attn_override = torch.zeros_like(d['attention_override'])
attn_override_mask = d['attention_override_mask']
layer = d['layer']
hooks.append(self.attention_layer(layer).register_forward_hook(
partial(intervention_hook,
attn_override=attn_override,
attn_override_mask=attn_override_mask)))
new_probabilities = self.get_probabilities_for_examples_multitoken(
context,
outputs)
for hook in hooks:
hook.remove()
return new_probabilities
def neuron_intervention_experiment(self,
word2intervention,
intervention_type, layers_to_adj=[], neurons_to_adj=[],
alpha=1, intervention_loc='all'):
"""
run multiple intervention experiments
"""
# if you run into memory issues, use the `bsize` argument
# bsize=100 works for XLNet, bsize=1 for TransformerXL
word2intervention_results = {}
for word in tqdm(word2intervention, desc='words'):
word2intervention_results[word] = self.neuron_intervention_single_experiment(
word2intervention[word], intervention_type, layers_to_adj, neurons_to_adj,
alpha, intervention_loc=intervention_loc)
return word2intervention_results
def neuron_intervention_single_experiment(self,
intervention,
intervention_type, layers_to_adj=[], neurons_to_adj=[],
alpha=100,
bsize=800, intervention_loc='all'):
"""
run one full neuron intervention experiment
"""
if self.is_txl: bsize = 100
if self.is_xlnet or self.is_txl: 32
with torch.no_grad():
'''
Compute representations for base terms (one for each side of bias)
'''
if self.is_xlnet:
num_alts = intervention.base_strings_tok.shape[0]
masks = torch.tensor([self.st_ids[0]]).repeat(num_alts, 1).to(self.device)
intervention.base_strings_tok = torch.cat(
(intervention.base_strings_tok, masks), dim=1)
base_representations = self.get_representations(
intervention.base_strings_tok[0],
intervention.position)
complement_representations = self.get_representations(
intervention.base_strings_tok[1],
intervention.position)
if intervention_type == 'indirect':
context = intervention.base_strings_tok[0]
rep = complement_representations
replace_or_diff = 'replace'
elif intervention_type == 'direct':
context = intervention.base_strings_tok[1]
rep = base_representations
replace_or_diff = 'replace'
else:
raise ValueError(f"Invalid intervention_type: {intervention_type}")
# Probabilities without intervention (Base case)
candidate1_base_prob, candidate2_base_prob = self.get_probabilities_for_examples(
intervention.base_strings_tok[0].unsqueeze(0),
intervention.candidates_tok)[0]
candidate1_alt_prob, candidate2_alt_prob = self.get_probabilities_for_examples(
intervention.base_strings_tok[1].unsqueeze(0),
intervention.candidates_tok)[0]
# Now intervening on potentially biased example
if intervention_loc == 'all':
candidate1_probs = torch.zeros((self.num_layers + 1, self.num_neurons))
candidate2_probs = torch.zeros((self.num_layers + 1, self.num_neurons))
for layer in range(-1, self.num_layers):
for neurons in batch(range(self.num_neurons), bsize):
neurons_to_search = [[i] + neurons_to_adj for i in neurons]
layers_to_search = [layer] + layers_to_adj
probs = self.neuron_intervention(
context=context,
outputs=intervention.candidates_tok,
rep=rep,
layers=layers_to_search,
neurons=neurons_to_search,
position=intervention.position,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons, probs):
candidate1_probs[layer + 1][neuron] = p1
candidate2_probs[layer + 1][neuron] = p2
# Now intervening on potentially biased example
elif intervention_loc == 'layer':
layers_to_search = (len(neurons_to_adj) + 1)*[layers_to_adj]
candidate1_probs = torch.zeros((1, self.num_neurons))
candidate2_probs = torch.zeros((1, self.num_neurons))
for neurons in batch(range(self.num_neurons), bsize):
neurons_to_search = [[i] + neurons_to_adj for i in neurons]
probs = self.neuron_intervention(
context=context,
outputs=intervention.candidates_tok,
rep=rep,
layers=layers_to_search,
neurons=neurons_to_search,
position=intervention.position,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons, probs):
candidate1_probs[0][neuron] = p1
candidate2_probs[0][neuron] = p2
else:
probs = self.neuron_intervention(
context=context,
outputs=intervention.candidates_tok,
rep=rep,
layers=layers_to_adj,
neurons=neurons_to_adj,
position=intervention.position,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons_to_adj, probs):
candidate1_probs = p1
candidate2_probs = p2
return (candidate1_base_prob, candidate2_base_prob,
candidate1_alt_prob, candidate2_alt_prob,
candidate1_probs, candidate2_probs)
def attention_intervention_experiment(self, intervention, effect):
"""
Run one full attention intervention experiment
measuring indirect or direct effect.
"""
# E.g. The doctor asked the nurse a question. She
x = intervention.base_strings_tok[0]
# E.g. The doctor asked the nurse a question. He
x_alt = intervention.base_strings_tok[1]
if effect == 'indirect':
input = x_alt # Get attention for x_alt
elif effect == 'direct':
input = x # Get attention for x
else:
raise ValueError(f"Invalid effect: {effect}")
# batch = torch.tensor(input).unsqueeze(0).to(self.device)
# attention_override = self.model(batch)[-1]
if self.is_xlnet:
batch = input.clone().detach().unsqueeze(0).to(self.device)
target_mapping = torch.zeros(
(1, 1, len(input)), dtype=torch.float, device=self.device)
attention_override = self.model(
batch, target_mapping=target_mapping)[-1]
else:
batch = input.clone().detach().unsqueeze(0).to(self.device)
attention_override = self.model(batch)[-1]
batch_size = 1
seq_len = len(x)
seq_len_alt = len(x_alt)
assert seq_len == seq_len_alt
# assert len(attention_override) == self.num_layers
# assert attention_override[0].shape == (batch_size, self.num_heads, seq_len, seq_len)
with torch.no_grad():
candidate1_probs_head = torch.zeros((self.num_layers, self.num_heads))
candidate2_probs_head = torch.zeros((self.num_layers, self.num_heads))
candidate1_probs_layer = torch.zeros(self.num_layers)
candidate2_probs_layer = torch.zeros(self.num_layers)
if effect == 'indirect':
context = x
else:
context = x_alt
# Intervene at every layer and head by overlaying attention induced by x_alt
model_attn_override_data = [] # Save layer interventions for model-level intervention later
for layer in range(self.num_layers):
layer_attention_override = attention_override[layer]
if self.is_xlnet:
attention_override_mask = torch.ones_like(layer_attention_override[0], dtype=torch.uint8)
else:
attention_override_mask = torch.ones_like(layer_attention_override, dtype=torch.uint8)
layer_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
candidate1_probs_layer[layer], candidate2_probs_layer[layer] = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data = layer_attn_override_data)
model_attn_override_data.extend(layer_attn_override_data)
for head in range(self.num_heads):
if self.is_xlnet:
attention_override_mask = torch.zeros_like(layer_attention_override[0], dtype=torch.uint8)
else:
attention_override_mask = torch.zeros_like(layer_attention_override, dtype=torch.uint8)
attention_override_mask[0][head] = 1 # Set mask to 1 for single head only
head_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
candidate1_probs_head[layer][head], candidate2_probs_head[layer][head] = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=head_attn_override_data)
# Intervene on entire model by overlaying attention induced by x_alt
candidate1_probs_model, candidate2_probs_model = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=model_attn_override_data)
return candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\
candidate1_probs_model, candidate2_probs_model
def attention_intervention_single_experiment(self, intervention, effect, layers_to_adj, heads_to_adj, search):
"""
Run one full attention intervention experiment
measuring indirect or direct effect.
"""
# E.g. The doctor asked the nurse a question. He
x = intervention.base_strings_tok[0]
# E.g. The doctor asked the nurse a question. She
x_alt = intervention.base_strings_tok[1]
if effect == 'indirect':
input = x_alt # Get attention for x_alt
elif effect == 'direct':
input = x # Get attention for x
else:
raise ValueError(f"Invalid effect: {effect}")
batch = torch.tensor(input).unsqueeze(0).to(self.device)
attention_override = self.model(batch)[-1]
batch_size = 1
seq_len = len(x)
seq_len_alt = len(x_alt)
assert seq_len == seq_len_alt
assert len(attention_override) == self.num_layers
assert attention_override[0].shape == (batch_size, self.num_heads, seq_len, seq_len)
with torch.no_grad():
if search:
candidate1_probs_head = torch.zeros((self.num_layers, self.num_heads))
candidate2_probs_head = torch.zeros((self.num_layers, self.num_heads))
if effect == 'indirect':
context = x
else:
context = x_alt
model_attn_override_data = []
for layer in range(self.num_layers):
if layer in layers_to_adj:
layer_attention_override = attention_override[layer]
layer_ind = np.where(layers_to_adj == layer)[0]
heads_in_layer = heads_to_adj[layer_ind]
attention_override_mask = torch.zeros_like(layer_attention_override, dtype=torch.uint8)
# set multiple heads in layer to 1
for head in heads_in_layer:
attention_override_mask[0][head] = 1 # Set mask to 1 for single head only
# get head mask
head_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
# should be the same length as the number of unique layers to adj
model_attn_override_data.extend(head_attn_override_data)
# basically generate the mask for the layers_to_adj and heads_to_adj
if search:
for layer in range(self.num_layers):
layer_attention_override = attention_override[layer]
layer_ind = np.where(layers_to_adj == layer)[0]
heads_in_layer = heads_to_adj[layer_ind]
for head in range(self.num_heads):
if head not in heads_in_layer:
model_attn_override_data_search = []
attention_override_mask = torch.zeros_like(layer_attention_override, dtype=torch.uint8)
heads_list = [head]
if len(heads_in_layer) > 0:
heads_list.extend(heads_in_layer)
for h in (heads_list):
attention_override_mask[0][h] = 1 # Set mask to 1 for single head only
head_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
model_attn_override_data_search.extend(head_attn_override_data)
for override in model_attn_override_data:
if override['layer'] != layer:
model_attn_override_data_search.append(override)
candidate1_probs_head[layer][head], candidate2_probs_head[layer][head] = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=model_attn_override_data_search)
else:
candidate1_probs_head[layer][head] = -1
candidate2_probs_head[layer][head] = -1
else:
candidate1_probs_head, candidate2_probs_head = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=model_attn_override_data)
return candidate1_probs_head, candidate2_probs_head
def main():
DEVICE = 'cpu'
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = Model(device=DEVICE)
base_sentence = "The {}"
base_word = 'key'
intervention = Intervention(
tokenizer,
base_sentence,
[base_word, 'keys'],
["is", "are"],
device=DEVICE)
interventions = {base_word: intervention}
for intervention_type in ['direct']:
intervention_results = model.neuron_intervention_experiment(
interventions, intervention_type)
df = convert_results_to_pd(
interventions, intervention_results)
print('more probable candidate per layer, across all neurons in the layer')
print(df[0:5])
df.to_csv(f'results/intervention_examples/results_{intervention_type}.csv')
if __name__ == "__main__":
main()
| 37,950 | 44.724096 | 129 | py |
lm-intervention | lm-intervention-master/attention_utils.py | import torch
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from tqdm import tqdm
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
def perform_intervention(intervention, model, effect_types=('indirect', 'direct')):
"""Perform intervention and return results for specified effects"""
x = intervention.base_strings_tok[0] # E.g. The doctor asked the nurse a question. She
x_alt = intervention.base_strings_tok[1] # E.g. The doctor asked the nurse a question. He
with torch.no_grad():
candidate1_base_prob, candidate2_base_prob = model.get_probabilities_for_examples_multitoken(
x,
intervention.candidates_tok)
candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
x_alt,
intervention.candidates_tok)
candidate1 = ' '.join(intervention.candidates[0]).replace('Ġ', '')
candidate2 = ' '.join(intervention.candidates[1]).replace('Ġ', '')
odds_base = candidate2_base_prob / candidate1_base_prob
odds_alt = candidate2_alt_prob / candidate1_alt_prob
total_effect = (odds_alt - odds_base) / odds_base
results = {
'base_string1': intervention.base_strings[0],
'base_string2': intervention.base_strings[1],
'candidate1': candidate1,
'candidate2': candidate2,
'candidate1_base_prob': candidate1_base_prob,
'candidate2_base_prob': candidate2_base_prob,
'odds_base': odds_base,
'candidate1_alt_prob': candidate1_alt_prob,
'candidate2_alt_prob': candidate2_alt_prob,
'odds_alt': odds_alt,
'total_effect': total_effect,
}
for effect_type in effect_types:
candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\
candidate1_probs_model, candidate2_probs_model = model.attention_intervention_experiment(
intervention, effect_type)
odds_intervention_head = candidate2_probs_head / candidate1_probs_head
odds_intervention_layer = candidate2_probs_layer / candidate1_probs_layer
odds_intervention_model = candidate2_probs_model / candidate1_probs_model
effect_head = (odds_intervention_head - odds_base) / odds_base
effect_layer = (odds_intervention_layer - odds_base) / odds_base
effect_model = (odds_intervention_model - odds_base) / odds_base
results[effect_type + "_odds_head"] = odds_intervention_head.tolist()
results[effect_type + "_effect_head"] = effect_head.tolist()
results[effect_type + "_effect_layer"] = effect_layer.tolist()
results[effect_type + "_effect_model"] = effect_model
return results
def report_intervention(results, effect_types=('indirect', 'direct'), verbose=False):
"""Report results for single intervention"""
print(f"x : {results['base_string1']}")
print(f"x': {results['base_string2']}")
print(f"c1: {results['candidate1']}")
print(f"c2: {results['candidate2']}")
print(f"\np(c2|x) / p(c1|x) = {results['odds_base']:.5f}")
print(f"p(c2|x') / p(c1|x') = {results['odds_alt']:.5f}")
print(f"\nTOTAL Effect: (p(c2|x') / p(c1|x')) / (p(c2|x) / p(c1|x)) - 1 = {results['total_effect']:.3f}")
for effect_type in effect_types:
if verbose:
print(f'\n{effect_type.upper()} Effect')
if effect_type == 'indirect':
print(" Intervention: replace Attn(x) with Attn(x') in a specific layer/head")
print(f" Effect = (p(c2|x, Attn(x')) / p(c1|x, Attn(x')) / (p(c2|x) / p(c1|x)) - 1")
elif effect_type == 'direct':
print(" Intervention: replace x with x' while preserving Attn(x) in a specific layer/head")
print(f" Effect = (p(c2|x', Attn(x)) / p(c1|x', Attn(x)) / (p(c2|x) / p(c1|x)) - 1")
plt.figure(figsize=(9, 7))
ax = sns.heatmap(results[effect_type + '_effect_head'], annot=True, annot_kws={"size": 12}, fmt=".2f")
ax.set(xlabel='Head', ylabel='Layer', title=f'{effect_type.capitalize()} Effect')
def perform_interventions(interventions, model, effect_types=('indirect', 'direct')):
"""Perform multiple interventions"""
results_list = []
for intervention in tqdm(interventions):
results = perform_intervention(intervention, model, effect_types)
results_list.append(results)
return results_list
def report_interventions_summary_by_head(results, effect_types=('indirect', 'direct'), verbose=False, k=10,
show_head_examples=False):
"""Report summary results for multiple interventions by head"""
df = pd.DataFrame(results)
print('*** SUMMARY BY HEAD ***')
print(f"Num interventions: {len(df)}")
print(f"Mean total effect: {df.total_effect.mean():.3f}")
for effect_type in effect_types:
effect = np.stack(df[effect_type + '_effect_head'].to_numpy()) # Convert column to 3d ndarray (num_examples x num_layers x num_heads)
mean_effect = effect.mean(axis=0)
if effect_type == 'indirect':
ranking_metric = mean_effect
else:
ranking_metric = -mean_effect
topk_indices = topk_indices(ranking_metric, k)
# Compute significance levels
all_values = effect.flatten()
print(f'\n{effect_type.upper()} Effect (mean = {all_values.mean()})')
print(f"Top {k} heads:")
for ind in topk_indices:
layer, head = np.unravel_index(ind, mean_effect.shape)
head_values = effect[:, layer, head].flatten()
tstatistic, pvalue = ttest_ind(head_values, all_values)
if effect_type == 'indirect':
assert tstatistic > 0
else:
assert tstatistic < 0
one_tailed_pvalue = pvalue / 2
print(f' {layer} {head}: {mean_effect[layer, head]:.3f} (p={one_tailed_pvalue:.4f})')
if effect_type == 'indirect' and show_head_examples:
top_results_for_head = sorted(results,
key=lambda result: result['indirect_effect_head'][layer][head],
reverse=True)
for result in top_results_for_head[:3]:
print(f' {result["indirect_effect_head"][layer][head]:.3f} '
f'{result["base_string1"]} | {result["candidate1"]} | {result["candidate2"]}')
if verbose:
if effect_type == 'indirect':
print(" Intervention: replace Attn(x) with Attn(x') in a specific layer/head")
print(f" Effect = (p(c2|x, Attn(x')) / p(c1|x, Attn(x')) / (p(c2|x) / p(c1|x)) - 1")
elif effect_type == 'direct':
print(" Intervention: replace x with x' while preserving Attn(x) in a specific layer/head")
print(f" Effect = (p(c2|x', Attn(x)) / p(c1|x', Attn(x)) / (p(c2|x) / p(c1|x)) - 1")
plt.figure(figsize=(14, 10))
ax = sns.heatmap(mean_effect, annot=True, annot_kws={"size": 12}, fmt=".2f")
ax.set(xlabel='Head', ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
def report_interventions_summary_by_layer(results, effect_types=('indirect', 'direct')):
"""Report summary results for multiple interventions by layer"""
df = pd.DataFrame(results)
print('*** SUMMARY BY LAYER ***')
print(f"Num interventions: {len(df)}")
print(f"Mean total effect: {df.total_effect.mean():.3f}")
for effect_type in effect_types:
effect = np.stack(df[effect_type + '_effect_layer'].to_numpy()) # Convert column to 2d ndarray (num_examples x num_layers)
mean_effect = effect.mean(axis=0)
n_layers = mean_effect.shape[0]
plt.figure(figsize=(9, 7))
ax = sns.barplot(x=mean_effect, y=list(range(n_layers)), color="blue", saturation=.3, orient="h")
ax.set(ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
def get_odds_ratio(intervention, model):
x = intervention.base_strings_tok[0]
x_alt = intervention.base_strings_tok[1]
with torch.no_grad():
candidate1_base_prob, candidate2_base_prob = model.get_probabilities_for_examples_multitoken(
x,
intervention.candidates_tok)
candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
x_alt,
intervention.candidates_tok)
odds_base = candidate2_base_prob / candidate1_base_prob
odds_alt = candidate2_alt_prob / candidate1_alt_prob
return odds_alt / odds_base
def topk_indices(arr, k):
"""Return indices of top-k values"""
return (-arr).argsort(axis=None)[:k]
if __name__ == "__main__":
from transformers import GPT2Tokenizer
from experiment_num_agreement import Intervention, Model
from pandas import DataFrame
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = Model(output_attentions=True)
# Test experiment
interventions = [
Intervention(
tokenizer,
"The doctor asked the nurse a question. {}",
["He", "She"],
["asked", "answered"]),
Intervention(
tokenizer,
"The doctor asked the nurse a question. {}",
["He", "She"],
["requested", "responded"])
]
results = perform_interventions(interventions, model)
report_interventions_summary_by_layer(results)
| 9,550 | 44.265403 | 142 | py |
lm-intervention | lm-intervention-master/attention_figures3.py | """Creates figures showing attention for specific examples, based on JSON files"""
import json
import math
from operator import itemgetter
import numpy as np
import seaborn as sns
import torch
from matplotlib import pyplot as plt
from transformers import GPT2Model, GPT2Tokenizer
BLACK = '#000000'
GRAY = '#303030'
def save_fig(prompts, heads, model, tokenizer, fname, device, highlight_indices=None):
palette = sns.color_palette('muted')
plt.rc('text', usetex=True)
fig, axs = plt.subplots(1, 2, sharey=False, figsize=(4.0, 3.5))
axs[0].yaxis.set_ticks_position('none')
plt.rcParams.update({'axes.titlesize': 'xx-large'})
attentions = []
max_attn = 0
seqs = []
for g_index in range(2):
prompt = prompts[g_index]
print(prompt)
input_ = tokenizer.encode(prompt)
print(input_)
batch = torch.tensor(input_).unsqueeze(0).to(device)
attention = model(batch)[-1]
seq = tokenizer.convert_ids_to_tokens(input_)
print(seq)
seq = [t.replace('Ġ', '') for t in seq]
seqs.append(seq)
seq_len = len(input_)
attention = torch.stack(attention)
attention = attention.squeeze(1)
assert torch.allclose(attention.sum(-1), torch.tensor([1.0]))
attentions.append(attention)
attn_sum = torch.Tensor([0])
for layer, head in heads:
attn_sum = attention[layer][head][-1] + attn_sum
if max(attn_sum) > max_attn:
max_attn = max(attn_sum)
xlim_upper = math.ceil(max_attn * 10) / 10
for g_index in range(2):
attention = attentions[g_index]
head_names = []
ax = axs[g_index]
seq = seqs[g_index]
formatted_seq = []
if highlight_indices:
for i, t in enumerate(seq):
formatted_t = t
for j in range(2):
if i in highlight_indices[j]:
if j == g_index:
formatted_t = f"\\textbf{{{t}}}"
else:
formatted_t = f"\\setul{{.15ex}}{{.2ex}}\\ul{{{t}}}"
break
formatted_seq.append(formatted_t)
formatted_seq[-1] = f"\\textbf{{{formatted_seq[-1]}}}"
else:
formatted_seq = seq
print('formatted', formatted_seq)
plts = []
left = None
for i, (layer, head) in enumerate(heads):
attn_last_word = attention[layer][head][-1].numpy()
seq_placeholders = [f'a{i}' for i in range(len(formatted_seq))]
if left is None:
print(attn_last_word)
p = ax.barh(seq_placeholders, attn_last_word, color=palette[i], linewidth=0)
else:
p = ax.barh(seq_placeholders, attn_last_word, left=left, color=palette[i], linewidth=0)
print(ax.get_yticklabels())
ax.set_yticklabels(formatted_seq)
if left is None:
left = np.zeros_like(attn_last_word)
left += attn_last_word
if highlight_indices:
for i in range(seq_len):
if i in highlight_indices[g_index]:
color = BLACK
else:
color = GRAY
ax.get_yticklabels()[i].set_color(color)
ax.get_yticklabels()[-1].set_color(BLACK)
plts.append(p)
head_names.append(f"{layer}-{head}")
ax.set_xlim([0, xlim_upper])
# ax.set_xlim([0, 0.5])
ax.set_xticks([0, xlim_upper])
ax.invert_yaxis()
plt.setp(ax.get_yticklabels(), fontsize=8, ha='right')
#ax.set_xticks([0, 0.5])
plt.setp(ax.get_xticklabels(), fontsize=7)
sns.despine(left=True, bottom=True)
ax.tick_params(axis='x', pad=0, length=0)
ax.tick_params(axis='y', pad=0)
ax.yaxis.labelpad = 0
ax.xaxis.labelpad = 0
lgd = plt.figlegend(plts, head_names,'lower center', fontsize=7, borderpad=0.3, handlelength=.6,
handletextpad=.2, labelspacing = 0.1, bbox_to_anchor=(0.86, 0.11))
plt.savefig(fname, format='pdf', bbox_extra_artists = (lgd,), bbox_inches = 'tight')
plt.close()
def main():
sns.set_context("paper")
sns.set_style("white")
device = 'cpu'
plt.rc('text', usetex=True)
plt.rcParams.update({
"text.latex.preamble": [
"\\usepackage{color}",
"\\usepackage{soul}",
"\\setulcolor{blue}"
]
})
top_heads = {
'gpt2':[(10, 9), (11, 11)],
'gpt2-medium': [(10, 9), (6, 15), (10,12)],
'gpt2-xl':[(16,15), (16, 24), (17,10)],
'gpt2-large':[(16,19), (16,5), (15,6)],
'distilgpt2': [(3,1), (2,6), (3,6)]
}
models = ['gpt2']#, 'gpt2-medium', 'gpt2-xl', 'gpt2-large', 'distilgpt2']
# structures = ['simple', 'distractor_1', 'distractor', 'within_rc_singular', 'singular', 'rc_singular']
structures = ['simple']
'''
examples_to_highlight = {
"The guard appreciated getting treatment from the nurse": [[7], [1]],
"The driver transported the housekeeper to the job because": [[4,5], [1]],
"The manager promised to give a raise to the teacher": [[9], [1]],
"The driver never drove the librarian anywhere": [[5,6], [1]],
"The nurse examined the farmer for injuries because": [[1], [4]],
"The CEO ordered the cleaner out of the room": [[4],[1]],
"The hairdresser shouted at the driver because": [[1,2,3,4],[8]],
"The chief demanded an apology from the teacher because": [[7], [1]],
"The physician examined the auditor for sores because": [[4],[1]],
"The laborer dug a hole for the assistant because": [[8],[1,2]]
}
'''
# split = 'dev'
testing = False
for model_version in models:
for structure in structures:
if structure.startswith('within_rc'):
highlight_indices = [[4], [1]]
elif structure.startswith('rc'):
highlight_indices = [[1], [4]]
elif structure == 'singular' or structure == 'plural':
highlight_indices = [[1], [4]]
else:
highlight_indices = None
heads = top_heads[model_version]
if model_version == 'distilgpt2':
filter = 'unfiltered' # In order to get canonical example
else:
filter = 'filtered'
fname = f"attention_results/{structure}/attention_intervention_{model_version}_{filter}.json"
with open(fname) as f:
data = json.load(f)
prompts = None
results = data['results']
results_by_ratio = sorted(results, key=itemgetter('total_effect'), reverse=True)
with torch.no_grad():
# Get attention and validate
model = GPT2Model.from_pretrained(model_version, output_attentions=True)
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model.eval()
for result_index, result in enumerate(results_by_ratio):
prompts = (result['base_string1']+' '+result['candidate1'], result['base_string2']+' '+result['candidate2'])
#highlight_indices = None
#for example, indices in examples_to_highlight.items():
# if example in prompts[0]:
# highlight_indices = indices
# break
fname = f'attention_figures/qualitative/{structure}_{model_version}_{filter}_{result_index}.pdf'
save_fig(prompts, heads, model, tokenizer, fname, device, highlight_indices)
if result_index >= 5:
break
# For testing only:
if testing:
break
if testing:
break
if __name__ == '__main__':
main() | 8,088 | 38.847291 | 128 | py |
lm-intervention | lm-intervention-master/attention_intervention_model.py |
"""
Changes the huggingface transformer attention module to allow interventions
in the attention distribution.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class AttentionOverride(nn.Module):
"""A copy of `modeling_gpt2.Attention` class, but with overridden attention values"""
def __init__(self, attention, attn_override, attn_override_mask):
"""
Args:
attention: instance of modeling_gpt2.Attention from which variables will be
copied.
attn_override: values to override the computed attention weights.
Shape is [num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [num_heads, seq_len, seq_len]
"""
super(AttentionOverride, self).__init__()
# Copy values from attention
self.output_attentions = attention.output_attentions
self.register_buffer("bias", attention._buffers["bias"])
self.n_head = attention.n_head
self.split_size = attention.split_size
self.scale = attention.scale
self.c_attn = attention.c_attn
self.c_proj = attention.c_proj
self.attn_dropout = attention.attn_dropout
self.resid_dropout = attention.resid_dropout
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
# attn_override and attn_override_mask are of shape
# (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is
# being overridden.
override_seq_len = self.attn_override_mask.shape[-1]
w[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
self.attn_override,
w[:, :, :override_seq_len, :override_seq_len],
)
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = (
layer_past[0].transpose(-2, -1),
layer_past[1],
) # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack(
(key.transpose(-2, -1), value)
) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class TXLAttentionOverride(nn.Module):
""" A copy of `modeling_transfo_xl.RelPartialLearnableMultiHeadAttn` class,
but with overridden attention values """
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_transfo_xl.RelPartialLearnableMultiHeadAttn
from which variables will be copied
attn_override: values to override the computed attention weights.
Shape is [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super(TXLAttentionOverride, self).__init__()
# Copy values from module
self.output_attentions = module.output_attentions
self.n_head = module.n_head
self.d_model = module.d_model
self.d_head = module.d_head
self.dropout = module.dropout
self.qkv_net = module.qkv_net
self.drop = module.drop
self.dropatt = module.dropatt
self.o_net = module.o_net
self.layer_norm = module.layer_norm
self.scale = module.scale
self.pre_lnorm = module.pre_lnorm
self.r_r_bias = module.r_r_bias
self.r_w_bias = module.r_w_bias
self.r_net = module.r_net
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def _rel_shift(self, x):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
return x
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
# compute attention probability
if attn_mask is not None and torch.sum(attn_mask).item():
attn_mask = attn_mask == 1 # Switch to bool
if attn_mask.dim() == 2:
if next(self.parameters()).dtype == torch.float16:
attn_score = (
attn_score.float().masked_fill(attn_mask[None, :, :, None], -65000).type_as(attn_score)
)
else:
attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# Intervention:
# attn_override and attn_override_mask are of shape (bsz, n_heads, query_seq_len, key_seq_len)
# attn_prob is of shape (query_seq_len, key_seq_len, bsz, n_heads)
_, _, override_q_len, override_k_len = self.attn_override_mask.shape
attn_prob[:override_q_len, :override_k_len, :, :] = torch.where(
self.attn_override_mask.permute(2, 3, 0, 1),
self.attn_override.permute(2, 3, 0, 1),
attn_prob[:override_q_len, :override_k_len, :, :])
# compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
outputs = [w + attn_out]
else:
# residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
class XLNetAttentionOverride(nn.Module):
""" A copy of `modeling_xlnet.XLNetRelativeAttention` class,
but with overridden attention values """
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_xlnet.XLNetRelativeAttention
from which variables will be copied
attn_override: values to override the computed attention weights.
Tuple of content and query attentions (2-stream self-attention),
each of shape [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super().__init__()
self.output_attentions = module.output_attentions
# if config.d_model % config.n_head != 0:
# raise ValueError(
# "The hidden size (%d) is not a multiple of the number of attention "
# "heads (%d)" % (config.d_model, config.n_head)
# )
self.n_head = module.n_head
self.d_head = module.d_head
self.d_model = module.d_model
self.scale = module.scale
self.q = module.q
self.k = module.k
self.v = module.v
self.o = module.o
self.r = module.r
self.r_r_bias = module.r_r_bias
self.r_s_bias = module.r_s_bias
self.r_w_bias = module.r_w_bias
self.seg_embed = module.seg_embed
self.layer_norm = module.layer_norm
self.dropout = module.dropout
# Set attention override values
self.content_attn_override, self.query_attn_override = attn_override
self.attn_override_mask = attn_override_mask
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, attn_override, seg_mat=None, attn_mask=None, head_mask=None):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# Intervention:
# attn_override and attn_override_mask are of shape (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is being overridden
override_seq_len = self.attn_override_mask.shape[-1]
attn_prob[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
attn_override,
attn_prob[:, :, :override_seq_len, :override_seq_len])
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r,
attn_override=self.content_attn_override,
seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r,
attn_override=self.query_attn_override,
seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
assert False ### NEW
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
assert False ### NEW
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class BertAttentionOverride(nn.Module):
"""A copy of `modeling_bert.BertSelfAttention` class, but with overridden attention values"""
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_bert.BertSelfAttentionOverride
from which variables will be copied
attn_override: values to override the computed attention weights.
Shape is [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super().__init__()
# if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
# raise ValueError(
# "The hidden size (%d) is not a multiple of the number of attention "
# "heads (%d)" % (config.hidden_size, config.num_attention_heads)
# )
self.output_attentions = module.output_attentions
self.num_attention_heads = module.num_attention_heads
self.attention_head_size = module.attention_head_size
self.all_head_size = module.all_head_size
self.query = module.query
self.key = module.key
self.value = module.value
self.dropout = module.dropout
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# Intervention:
# attn_override and attn_override_mask are of shape (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is being overridden
override_seq_len = self.attn_override_mask.shape[-1]
attention_probs[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
self.attn_override,
attention_probs[:, :, :override_seq_len, :override_seq_len])
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class DistilBertAttentionOverride(nn.Module):
"""A copy of `modeling_distilbert.MultiHeadSelfAttention` class, but with overridden attention values"""
def __init__(self, module, attn_override, attn_override_mask):
"""
Args:
module: instance of modeling_distilbert.MultiHeadSelfAttention
from which variables will be copied
attn_override: values to override the computed attention weights.
Shape is [bsz, num_heads, seq_len, seq_len]
attn_override_mask: indicates which attention weights to override.
Shape is [bsz, num_heads, seq_len, seq_len]
"""
super().__init__()
self.n_heads = module.n_heads
self.dim = module.dim
self.dropout = module.dropout
self.output_attentions = module.output_attentions
assert self.dim % self.n_heads == 0
self.q_lin = module.q_lin
self.k_lin = module.k_lin
self.v_lin = module.v_lin
self.out_lin = module.out_lin
self.pruned_heads = module.pruned_heads
# Set attention override values
self.attn_override = attn_override
self.attn_override_mask = attn_override_mask
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
mask = torch.ones(self.n_heads, attention_head_size)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query, key, value, mask, head_mask=None):
"""
Parameters
----------
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Outputs
-------
weights: torch.tensor(bs, n_heads, seq_length, seq_length)
Attention weights
context: torch.tensor(bs, seq_length, dim)
Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x):
""" separate heads """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" group heads """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length)
weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)
weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
# Intervention:
# attn_override and attn_override_mask are of shape (batch_size, num_heads, override_seq_len, override_seq_len)
# where override_seq_len is the length of subsequence for which attention is being overridden
override_seq_len = self.attn_override_mask.shape[-1]
weights[:, :, :override_seq_len, :override_seq_len] = torch.where(
self.attn_override_mask,
self.attn_override,
weights[:, :, :override_seq_len, :override_seq_len])
context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if self.output_attentions:
return (context, weights)
else:
return (context,) | 29,231 | 39.998597 | 127 | py |
lm-intervention | lm-intervention-master/transformers_modified/modeling_transfo_xl.py | """ A copy of transformers/modeling_transfo_xl.py from the Huggingface
transformers library modified so that the attention module is called with
non-keyword arguments (to make those arguments accessible to the hook).
"""
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
from transformers.modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP = {
"transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update(
{
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
}
)
for i, (out_l, proj_l, tie_proj) in enumerate(
zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({layer_str + "b": out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
if not tie_proj:
tf_to_pt_map.update({layer_str + "proj": proj_l})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name or "proj" in name:
array = np.transpose(array)
if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super().__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class RelPartialLearnableMultiHeadAttn(nn.Module):
def __init__(
self,
n_head,
d_model,
d_head,
dropout,
dropatt=0,
tgt_len=None,
ext_len=None,
mem_len=None,
pre_lnorm=False,
r_r_bias=None,
r_w_bias=None,
output_attentions=False,
layer_norm_epsilon=1e-5,
):
super().__init__()
self.output_attentions = output_attentions
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def _rel_shift(self, x):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
return x
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
# compute attention probability
if attn_mask is not None and torch.sum(attn_mask).item():
attn_mask = attn_mask == 1 # Switch to bool
if attn_mask.dim() == 2:
if next(self.parameters()).dtype == torch.float16:
attn_score = (
attn_score.float().masked_fill(attn_mask[None, :, :, None], -65000).type_as(attn_score)
)
else:
attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
outputs = [w + attn_out]
else:
# residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
super().__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
)
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None):
### MODIFIED ###
# attn_outputs = self.dec_attn(dec_inp, r, attn_mask=dec_attn_mask, mems=mems, head_mask=head_mask)
attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask)
### MODIFIED ###
ff_output = self.pos_ff(attn_outputs[0])
outputs = [ff_output] + attn_outputs[1:]
return outputs
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = TransfoXLConfig
pretrained_model_archive_map = TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_transfo_xl
base_model_prefix = "transformer"
def _init_weight(self, weight):
if self.config.init == "uniform":
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == "normal":
nn.init.normal_(weight, 0.0, self.config.init_std)
def _init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def _init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
self._init_weight(m.weight)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
elif classname.find("AdaptiveEmbedding") != -1:
if hasattr(m, "emb_projs"):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
self._init_weight(m.weight)
elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
self._init_weight(m.cluster_weight)
if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
self._init_bias(m.cluster_bias)
if hasattr(m, "out_projs"):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
else:
if hasattr(m, "r_emb"):
self._init_weight(m.r_emb)
if hasattr(m, "r_w_bias"):
self._init_weight(m.r_w_bias)
if hasattr(m, "r_r_bias"):
self._init_weight(m.r_r_bias)
if hasattr(m, "r_bias"):
self._init_bias(m.r_bias)
TRANSFO_XL_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.TransfoXLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
TRANSFO_XL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.TransfoXLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.n_token = config.vocab_size
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head,
config.d_model,
config.d_head,
config.d_inner,
config.dropout,
tgt_len=config.tgt_len,
ext_len=config.ext_len,
mem_len=config.mem_len,
dropatt=config.dropatt,
pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias,
output_attentions=self.output_attentions,
layer_norm_epsilon=config.layer_norm_epsilon,
)
)
else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
raise NotImplementedError # Removed them to avoid maintaining dead code
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
self.init_weights()
def get_input_embeddings(self):
return self.word_emb
def set_input_embeddings(self, new_embeddings):
self.word_emb = new_embeddings
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def _prune_heads(self, heads):
logger.info("Head pruning is not implemented for Transformer-XL model")
pass
def init_mems(self, bsz):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, mlen, qlen):
# does not deal with None
if mems is None:
return None
# mems is not None
assert len(hids) == len(mems), "len(hids) != len(mems)"
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import TransfoXLTokenizer, TransfoXLModel
import torch
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states, mems = outputs[:2]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.size()
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if mems is None:
mems = self.init_mems(bsz)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
if inputs_embeds is not None:
word_emb = inputs_embeds
else:
word_emb = self.word_emb(input_ids)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
else:
dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1 + mlen)[
:, :, None
]
hids = []
attentions = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
layer_outputs = layer(
core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i, head_mask=head_mask[i]
)
core_out = layer_outputs[0]
if self.output_attentions:
attentions.append(layer_outputs[1])
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
# We transpose back here to shape [bsz, len, hidden_dim]
outputs = [core_out.transpose(0, 1).contiguous(), new_mems]
if self.output_hidden_states:
# Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
hids.append(core_out)
hids = list(t.transpose(0, 1).contiguous() for t in hids)
outputs.append(hids)
if self.output_attentions:
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
attentions = list(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs.append(attentions)
return outputs # last hidden state, new_mems, (all hidden states), (all attentions)
@add_start_docstrings(
"""The Transformer-XL Model with a language modeling head on top
(adaptive softmax with weights tied to the adaptive input embeddings)""",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
assert (
self.sample_softmax <= 0
), "Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310"
self.crit = ProjectedAdaptiveLogSoftmax(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.init_weights()
def tie_weights(self):
"""
Run this to be sure output and input (adaptive) softmax weights are tied
"""
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, bsz):
return self.transformer.init_mems(bsz)
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None, labels=None):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel
import torch
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, mems = outputs[:2]
"""
if input_ids is not None:
bsz, tgt_len = input_ids.size(0), input_ids.size(1)
elif inputs_embeds is not None:
bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds)
last_hidden = transformer_outputs[0]
pred_hid = last_hidden[:, -tgt_len:]
outputs = transformer_outputs[1:]
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), labels)
if labels is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
outputs = [softmax_output] + outputs
else:
softmax_output = softmax_output.view(bsz, tgt_len)
outputs = [softmax_output, None] + outputs
return outputs # (loss), logits or None if labels is not None (speed up adaptive softmax), new_mems, (all hidden states), (all attentions)
def get_output_embeddings(self):
""" Double-check if you are using adaptive softmax.
"""
if self.sample_softmax > 0:
return self.out_layer
else:
return self.crit.out_layers[-1]
def prepare_inputs_for_generation(self, input_ids, past, **model_kwargs):
inputs = {"input_ids": input_ids}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs | 40,378 | 42.795011 | 151 | py |
lm-intervention | lm-intervention-master/transformers_modified/modeling_xlnet.py | """ A copy of transformers/modeling_xlnet.py from the Huggingface
transformers library modified so that the attention module is called with
non-keyword arguments (to make those arguments accessible to the hook).
"""
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from transformers.activations import gelu_new, swish
from transformers.configuration_xlnet import XLNetConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
logger = logging.getLogger(__name__)
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin",
"xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
ACT2FN = {"gelu": gelu_new, "relu": torch.nn.functional.relu, "swish": swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(
self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None
):
### MODIFIED ###
# outputs = self.rel_attn(
# output_h,
# output_g,
# attn_mask_h,
# attn_mask_g,
# r,
# seg_mat,
# mems=mems,
# target_mapping=target_mapping,
# head_mask=head_mask,
# )
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems,
target_mapping,
head_mask,
)
### MODIFIED ###
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.output_past = config.output_past
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
return pos_emb
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetModel.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=False)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
)
output_h, output_g = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
outputs = outputs + (new_mems,)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if self.output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **model_kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[0, 0, -1] = 1.0
inputs = {"input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss, next_token_logits = outputs[:2] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForSequenceClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForTokenClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForTokenClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
scores = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
token_type_ids=None,
input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
labels=None,
head_mask=None,
inputs_embeds=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape ``(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForMultipleChoice
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForMultipleChoice.from_pretrained('xlnet-base-cased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnsweringSimple
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnswering
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs | 79,781 | 47.946012 | 304 | py |
FPI | FPI-master/self_sup_task.py | import numpy as np
import tensorflow as tf
to_categorical = tf.keras.utils.to_categorical
'''
def to_categorical(y,num_classes):
onehot = np.zeros((len(y), num_classes))
onehot[np.arange(len(y)),y] = 1
return onehot
'''
def create_interp_mask(ima,patch_center,patch_width,patch_interp):
dims=np.shape(ima)
mask_i = np.zeros_like(ima)
for frame_ind in range(dims[0]):
coor_min = patch_center[frame_ind]-patch_width[frame_ind]
coor_max = patch_center[frame_ind]+patch_width[frame_ind]
#clip coordinates to within image dims
coor_min = np.clip(coor_min,0,dims[1:3])
coor_max = np.clip(coor_max,0,dims[1:3])
mask_i[frame_ind,
coor_min[0]:coor_max[0],
coor_min[1]:coor_max[1]] = patch_interp[frame_ind]
return mask_i
def patch_ex(ima1,ima2,num_classes=None,core_percent=0.8,tolerance=None):
#exchange patches between two image arrays based on a random interpolation factor
#create random anomaly
dims = np.array(np.shape(ima1))
core = core_percent*dims#width of core region
offset = (1-core_percent)*dims/2#offset to center core
min_width = np.round(0.05*dims[1])
max_width = np.round(0.2*dims[1])
center_dim1 = np.random.randint(offset[1],offset[1]+core[1],size=dims[0])
center_dim2 = np.random.randint(offset[2],offset[2]+core[2],size=dims[0])
patch_center = np.stack((center_dim1,center_dim2),1)
patch_width = np.random.randint(min_width,max_width,size=dims[0])
if num_classes == None:
#interpolation factor between 5 and 95%
patch_interp = np.random.uniform(0.05,0.95,size=dims[0])
else:
#interpolation between 0 and 1, num class options
patch_interp = np.random.choice(num_classes-1,size=dims[0])/(num_classes-1)#subtract 1 to exclude default class
offset = 1E-5#offset to separate 0 patches from background
mask_i = create_interp_mask(ima1,patch_center,patch_width,patch_interp+offset)
patch_mask = np.clip(np.ceil(mask_i),0,1)#all patches set to 1
mask_i = mask_i-patch_mask*offset#get rid of offset
mask_inv = patch_mask-mask_i
zero_mask = 1-patch_mask#zero in the region of the patch
patch_set1 = mask_i*ima1 + mask_inv*ima2 #interpolate between patches
patch_set2 = mask_inv*ima1 + mask_i*ima2
patchex1 = ima1*zero_mask + patch_set1
patchex2 = ima2*zero_mask + patch_set2
if tolerance:
valid_label = np.any(
np.floor(patch_mask*ima1*tolerance**-1)*tolerance != \
np.floor(patch_mask*ima2*tolerance**-1)*tolerance,
axis=3)
else:
valid_label = np.any(patch_mask*ima1 != patch_mask*ima2, axis=3)
label = valid_label[...,None]*mask_inv
if num_classes is not None:
label = label*(num_classes-1)
label = to_categorical(label,num_classes)
return (patchex1,label), (patchex2, label)
| 2,950 | 34.987805 | 119 | py |
FPI | FPI-master/fpiSubmit.py | import numpy as np
import itertools
import copy
from datetime import datetime
import os
import pickle
from sklearn.metrics import average_precision_score
import tensorflow as tf
import readData
import self_sup_task
from models.wide_residual_network import create_wide_residual_network_selfsup
from scipy.signal import savgol_filter
from utils import save_roc_pr_curve_data
import gc
def train_folder(input_dir,output_dir,mode,data):
gpu = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu[0], True)
data_frame = get_data_frame(data,input_dir,shuffle_order=True)
mdl = get_mdl(data,data_frame,restore=False)
submit_train(mdl,data_frame,output_dir,data)
return
def predict_folder(input_dir,output_dir,mode,data):
#K.manual_variable_initialization(True)
gpu = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpu[0], True)
data_frame = get_data_frame(data,input_dir,shuffle_order=False)
mdl = get_mdl(data,data_frame,restore=True)
submit_test(mdl,data_frame,output_dir,mode)
return
def get_data_frame(data,input_dir,shuffle_order=False,load_labels=False):
if 'brain' in data:
batch_dim = [256,256,256,1]
primary_axis = 2
elif 'abdom' in data:
batch_dim = [512,512,512,1]
primary_axis = 1
else:
raise ValueError("data type not correctly defined. Either choose 'brain','abdom', or add a new definition")
data_frame = readData.data_frame(batch_dim,primary_axis)
input_list = os.listdir(input_dir)
data_frame.load_data(input_list,input_dir,shuffle_order=shuffle_order,load_labels=load_labels)
return data_frame
def get_mdl(data,data_frame,restore=False):
if 'brain' in data:
n, k = (16,4)#network size
net_f='create_wide_residual_network_dec'
n_classes = 1
model_dir = '/workspace/restore_dir/brain/'
elif 'abdom' in data:
n, k = (19,4)#network size
net_f='create_wide_residual_network_decdeeper'
n_classes = 5
model_dir = '/workspace/restore_dir/abdom/'
else:
raise ValueError("data type not correctly defined. Either choose 'brain','abdom', or add a new definition")
if restore:
#grab weights and build model
model_fnames = os.listdir(model_dir)
model_fnames = [fn for fn in model_fnames if 'weights' in fn][0]
model_path = os.path.join(model_dir,model_fnames)
print(model_path)
mdl = tf.keras.models.load_model(model_path)
else:
#build new model
mdl = create_wide_residual_network_selfsup(data_frame.batch_dim[1:],
n_classes, n, k, net_f=net_f)
return mdl
@tf.function
def train_step(mdl,x, y):
loss_fn = mdl.compiled_loss
with tf.GradientTape() as tape:
logits = mdl(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, mdl.trainable_weights)
mdl.optimizer.apply_gradients(zip(grads, mdl.trainable_weights))
mdl.compiled_metrics.update_state(y, logits)
return loss_value
@tf.function
def test_step(mdl,x, y):
loss_fn = mdl.compiled_loss
logits = mdl(x, training=False)
loss_value = loss_fn(y, logits)
return loss_value
@tf.function
def pred_step(mdl,x):
pred = mdl(x, training=False)
return pred
def grouped(iterable, n):
#get n elements at a time
return zip(*[iter(iterable)]*n)
def submit_train(mdl,data_frame,output_dir,data,epochs=50,cyclic_epochs=0,save_name='selfsup_mdl',training_batch_size=32):
print('training start: {}'.format(datetime.now().strftime('%Y-%m-%d-%H%M')))
num_classes = mdl.output_shape[-1]
num_classes = None if num_classes <= 1 else num_classes
fpi_args = {'num_classes':num_classes,
'core_percent':0.5 if 'brain' in data else 0.8,
'tolerance': None if 'brain' in data else 1E-3
}
elem_in_epoch = len(data_frame.file_list)
if cyclic_epochs>0:
half_cycle_len = elem_in_epoch//4
lr_min = 1E-4
lr_max = 1E-1
half1 = np.linspace(lr_min,lr_max,half_cycle_len)
half2 = np.linspace(lr_max,lr_min,half_cycle_len)
lr_cycle = np.concatenate((half1,half2),0)
for epoch_i in range(epochs+cyclic_epochs):
if epoch_i>epochs and elem_i < len(lr_cycle):
#cyclic training portion, adjust learning rate
tf.keras.backend.set_value(mdl.optimizer.lr, lr_cycle[elem_i])
#get subjects in pairs for mixing
for batch_in,batch_in2 in grouped(data_frame.tf_dataset,2):
#apply fpi on batch
pex1,pex2 = self_sup_task.patch_ex(batch_in,batch_in2,**fpi_args)
ind_sampler = index_sampling(len(pex1[0]))#randomize slices in batch
for _ in range(len(pex1[0])//training_batch_size):
cur_inds = ind_sampler.get_inds(training_batch_size)
train_step(mdl,tf.gather(pex1[0],cur_inds),tf.gather(pex1[1],cur_inds))
train_step(mdl,tf.gather(pex2[0],cur_inds),tf.gather(pex2[1],cur_inds))
print('epoch {}: {}'.format(str(epoch_i),datetime.now().strftime('%Y-%m-%d-%H%M')))
#measure loss
for batch_in,batch_in2 in grouped(data_frame.tf_dataset,2):
break
pex1,pex2 = self_sup_task.patch_ex(batch_in,batch_in2,**fpi_args)
avg_loss = []
ind_sampler = index_sampling(len(pex1[0]))#randomize slices in batch
for _ in range(len(pex1[0])//training_batch_size):
cur_inds = ind_sampler.get_inds(training_batch_size)
avg_loss.append(test_step(mdl,tf.gather(pex1[0],cur_inds),tf.gather(pex1[1],cur_inds)))
avg_loss.append(test_step(mdl,tf.gather(pex2[0],cur_inds),tf.gather(pex2[1],cur_inds)))
avg_loss = np.mean(avg_loss)
print('Avg loss: {}'.format(avg_loss))
if epoch_i == 0:
best_loss = avg_loss
elif avg_loss < best_loss:
best_loss = avg_loss
print('new best loss')
save_model(mdl,output_dir,save_name+'_bestLoss',time_stamp=False)
if epoch_i % 10 == 0 or epoch_i>epochs:
#save every 10 epochs or every epoch in cyclic mode
save_model(mdl,output_dir,save_name)
#save final model
save_model(mdl,output_dir,save_name+'_final')
return
def submit_test(mdl,data_frame,output_dir,mode,batch_size=1,save_name='selfsup_mdl'):
print('testing start: {}'.format(datetime.now().strftime('%Y-%m-%d-%H%M')))
nii_file = 0
for batch_in in data_frame.tf_dataset:
#predict for subject
pred = np.zeros(np.shape(batch_in))
for ind in range(len(batch_in)//batch_size):
pred[ind:(ind+1)*batch_size] = pred_step(mdl,batch_in[ind:(ind+1)*batch_size])
output_chan = np.shape(pred)[-1]
if output_chan > 1:
pred *= np.arange(output_chan)/(output_chan-1)
pred = np.sum(pred,-1,keepdims=True)
#save output as nifti and label with label suffix
#print(data_frame.file_list[0])#only data, not label names
fname_i = data_frame.file_list[nii_file].split('/')[-1]
if 'sample' in mode:
#derive subject-level score
im_level_score = np.mean(pred,axis=(1,2,3))
window_size = int((len(im_level_score)*0.1)//2)*2+1#take 10% sliding filter window
im_level_score_f = savgol_filter(im_level_score,window_size,3)#order 3 polynomial
im_level_score_s = sorted(im_level_score_f)
im_level_score_s = im_level_score_s[int(len(im_level_score_s)*0.75):]
sample_score = np.mean(im_level_score_s)#mean of top quartile values
with open(os.path.join(output_dir,fname_i + ".txt"), "w") as write_file:
write_file.write(str(sample_score))
if 'pixel' in mode:
data_frame.save_nii(pred,output_dir,fname_i)
nii_file += 1
return
def save_model(mdl,results_dir,fname,time_stamp=True):
#save model
if time_stamp:
#mdl_weights_name = fname+'_{}_weights.h5'.format(datetime.now().strftime('%Y-%m-%d-%H%M'))
mdl_weights_name = fname+'_{}_weights'.format(datetime.now().strftime('%Y-%m-%d-%H%M'))
else:
#mdl_weights_name = fname+'_weights.h5'
mdl_weights_name = fname+'_weights'
mdl_weights_path = os.path.join(results_dir, mdl_weights_name)
mdl.save(mdl_weights_path)
return
class index_sampling(object):
def __init__(self,total_len):
self.total_len = total_len
self.ind_generator = rand_ind_fisheryates(self.total_len)
def get_inds(self,batch_size):
cur_inds = list(itertools.islice(self.ind_generator,batch_size))
if len(cur_inds) < batch_size:
#end of iterator - reset/shuffle
self.ind_generator = rand_ind_fisheryates(self.total_len)
cur_inds = list(itertools.islice(self.ind_generator,batch_size))
return cur_inds
def reset():
self.ind_generator = rand_ind_fisheryates(self.total_len)
return
def rand_ind_fisheryates(num_inds):
numbers=np.arange(num_inds,dtype=np.uint32)
for ind_i in range(num_inds):
j=np.random.randint(ind_i,num_inds)
numbers[ind_i],numbers[j]=numbers[j],numbers[ind_i]
yield numbers[ind_i]
| 9,435 | 34.078067 | 122 | py |
FPI | FPI-master/var_ops.py | """
Tools for manipulating sets of variables.
"""
import numpy as np
from keras import backend as K
import tensorflow as tf
import copy
def interpolate_vars(old_vars, new_vars, epsilon):
"""
Interpolate between two sequences of variables.
"""
return add_vars(old_vars, scale_vars(subtract_vars(new_vars, old_vars), epsilon))
def average_vars(var_seqs):
"""
Average a sequence of variable sequences.
"""
res = []
for variables in zip(*var_seqs):
res.append(np.mean(variables, axis=0))
return res
def subtract_vars(var_seq_1, var_seq_2):
"""
Subtract one variable sequence from another.
"""
return [v1 - v2 for v1, v2 in zip(var_seq_1, var_seq_2)]
def add_vars(var_seq_1, var_seq_2):
"""
Add two variable sequences.
"""
return [v1 + v2 for v1, v2 in zip(var_seq_1, var_seq_2)]
def scale_vars(var_seq, scale):
"""
Scale a variable sequence.
"""
return [v * scale for v in var_seq]
def update_aux(var_seq_train,var_seq_aux,var_list,tvar_list):
"""
Copy aux variables into the train set
return variable list with
trainable values from train and
auxiliary from aux
"""
var_seq = [var_train_i if lyr_i in tvar_list else var_aux_i for var_train_i,var_aux_i,lyr_i in zip(var_seq_train,var_seq_aux,var_list)]
return var_seq
| 1,366 | 23.410714 | 143 | py |
FPI | FPI-master/models/wide_residual_network.py | #using code from https://github.com/asmith26/wide_resnets_keras.git
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import os
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
#sys.stdout = sys.stderr
# Prevent reaching to maximum recursion depth in `theano.tensor.grad`
#sys.setrecursionlimit(2 ** 20)
import numpy as np
np.random.seed(2 ** 10)
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, AveragePooling2D, BatchNormalization, Dropout, Input, Activation, Add, Dense, Flatten, UpSampling2D, Lambda, Concatenate
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras import losses
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend as K
import tensorflow as tf
from functools import partial
USE_BIAS = False # no bias in conv
WEIGHT_INIT = "he_normal"
WEIGHT_DECAY = 0.0005
CHANNEL_AXIS = -1
# Wide residual network http://arxiv.org/abs/1605.07146
def _wide_basic(n_input_plane, n_output_plane, stride, dropout_probability=0.0, direction='down'):
def f(net):
# format of conv_params:
# [ [nb_col="kernel width", nb_row="kernel height",
# subsample="(stride_vertical,stride_horizontal)",
# border_mode="same" or "valid"] ]
# B(3,3): orignal <<basic>> block
if direction == 'up':
conv_params = [ [3,3,(1,1),"same"],
[3,3,(1,1),"same"] ]
else:
conv_params = [ [3,3,stride,"same"],
[3,3,(1,1),"same"] ]
n_bottleneck_plane = n_output_plane
# Residual block
for i, v in enumerate(conv_params):
if i == 0:
if n_input_plane != n_output_plane:
net = BatchNormalization(axis=CHANNEL_AXIS)(net)
net = Activation("relu")(net)
convs = net
else:
convs = BatchNormalization(axis=CHANNEL_AXIS)(net)
convs = Activation("relu")(convs)
convs = Conv2D(n_bottleneck_plane,
(v[0],v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(convs)
if direction == 'up':
convs = UpSampling2D(stride)(convs)
else:
convs = BatchNormalization(axis=CHANNEL_AXIS)(convs)
convs = Activation("relu")(convs)
if dropout_probability > 0:
convs = Dropout(dropout_probability)(convs)
convs = Conv2D(n_bottleneck_plane,
(v[0],v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(convs)
# Shortcut Conntection: identity function or 1x1 convolutional
# (depends on difference between input & output shape - this
# corresponds to whether we are using the first block in each
# group; see _layer() ).
if n_input_plane != n_output_plane:
shortcut_stride = 1 if direction == 'up' else stride
shortcut = Conv2D(n_output_plane,
(1,1),
strides=shortcut_stride,
padding="same",
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(net)
if direction == 'up':
shortcut = UpSampling2D(stride)(shortcut)
else:
if stride == 1:
shortcut = net
elif direction == 'up':
shortcut = UpSampling2D(stride)(net)
else:
shortcut = AveragePooling2D(stride)(net)
return Add()([convs, shortcut])
return f
# "Stacking Residual Units on the same stage"
def _layer(block, n_input_plane, n_output_plane, count, stride, **kwargs):
def f(net):
net = block(n_input_plane, n_output_plane, stride, **kwargs)(net)
for i in range(2,int(count+1)):
net = block(n_output_plane, n_output_plane, stride=(1,1), **kwargs)(net)
return net
return f
def create_model():
logging.debug("Creating model...")
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k]
conv1 = Conv2D(n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=weight_init,
kernel_regularizer=l2(weight_decay),
use_bias=use_bias)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
batch_norm = BatchNormalization(axis=CHANNEL_AXIS)(conv4)
relu = Activation("relu")(batch_norm)
# Classifier block
pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
flatten = Flatten()(pool)
predictions = Dense(units=nb_classes, kernel_initializer=weight_init, use_bias=use_bias,
kernel_regularizer=l2(weight_decay), activation="softmax")(flatten)
model = Model(inputs=inputs, outputs=predictions)
return model
def create_wide_residual_network_dec(input_shape,num_classes,depth,k=4,dropout_probability=0.0,final_activation=None):
if final_activation is None:#unspecified
final_activation = 'softmax' if num_classes > 1 \
else 'sigmoid'
assert((depth - 6) % 10 == 0), 'depth should be 10n+6'
n = (depth - 6) // 10
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k, 64*k, 64*k]
conv1 = Conv2D(n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
conv5 = _layer(block_fn, n_input_plane=n_stages[3], n_output_plane=n_stages[4], count=n, stride=(2,2))(conv4)# "Stage 4 (spatial size: 4x4)"
conv6 = _layer(block_fn, n_input_plane=n_stages[4], n_output_plane=n_stages[5], count=n, stride=(2,2))(conv5)# "Stage 5 (spatial size: 2x2)"
block_fn = partial(_wide_basic,direction='up')#decoder blocks,keep n=1
upconv1 = _layer(block_fn, n_input_plane=n_stages[5], n_output_plane=n_stages[2], count=1, stride=(2,2))(conv6)# "Stage 1up (spatial size: 4x4)"
upconv2 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[1], count=1, stride=(2,2))(upconv1)# "Stage 2up (spatial size: 8x8)"
upconv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[0], count=1, stride=(2,2))(upconv2)# "Stage 3up (spatial size: 16x16)"
upconv4 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=num_classes, count=1, stride=(2,2))(upconv3)# "Stage 4up (spatial size: 32x32)"
logit = Lambda(lambda x:x,name='logit')(upconv4)
if final_activation == 'linear':
outputs = logit
else:
outputs = Activation(final_activation)(logit)
loss_f = 'categorical_crossentropy' if num_classes > 1 \
else 'binary_crossentropy'
return Model(inputs, outputs), loss_f
def create_wide_residual_network_decdeeper(input_shape,num_classes,depth,k=4,dropout_probability=0.0,final_activation=None):
if final_activation is None:#unspecified
final_activation = 'softmax' if num_classes > 1 \
else 'sigmoid'
assert((depth - 7) % 12 == 0), 'depth should be 12n+7'
n = (depth - 7) // 12
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k, 64*k, 64*k, 64*k]
conv1 = Conv2D(n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=WEIGHT_INIT,
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=USE_BIAS)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
conv5 = _layer(block_fn, n_input_plane=n_stages[3], n_output_plane=n_stages[4], count=n, stride=(2,2))(conv4)# "Stage 4 (spatial size: 4x4)"
conv6 = _layer(block_fn, n_input_plane=n_stages[4], n_output_plane=n_stages[5], count=n, stride=(2,2))(conv5)# "Stage 5 (spatial size: 2x2)"
conv7 = _layer(block_fn, n_input_plane=n_stages[5], n_output_plane=n_stages[6], count=n, stride=(2,2))(conv6)# "Stage 6 (spatial size: 1x1)"
block_fn = partial(_wide_basic,direction='up')#decoder blocks,keep n=1
upconv1 = _layer(block_fn, n_input_plane=n_stages[6], n_output_plane=n_stages[2], count=1, stride=(2,2))(conv7)# "Stage 1up (spatial size: 2x2)"
upconv2 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[2], count=1, stride=(2,2))(upconv1)# "Stage 1up (spatial size: 4x4)"
upconv3 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[1], count=1, stride=(2,2))(upconv2)# "Stage 2up (spatial size: 8x8)"
upconv4 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[0], count=1, stride=(2,2))(upconv3)# "Stage 3up (spatial size: 16x16)"
upconv5 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=num_classes, count=1, stride=(2,2))(upconv4)# "Stage 4up (spatial size: 32x32)"
logit = Lambda(lambda x:x,name='logit')(upconv5)
if final_activation == 'linear':
outputs = logit
else:
outputs = Activation(final_activation)(logit)
loss_f = 'categorical_crossentropy' if num_classes > 1 \
else 'binary_crossentropy'
return Model(inputs, outputs), loss_f
def create_wide_residual_network_selfsup(input_shape,*args,**kwargs):
if 'net_f' in kwargs:
net_f = globals()[kwargs['net_f']]
del kwargs['net_f']
else:
net_f = create_wide_residual_network_dec
print('Building with network: ' + net_f.__name__+ '\n')
net_ss,loss_f = net_f(input_shape,*args,**kwargs)
optim = Adam(lr=0.001)
#optim = SGD(lr=0.001)
#optim = SGD(lr=0.1, momentum=0.9, nesterov=True)
net_ss.compile(optim,[loss_f],['acc'])
return net_ss
| 12,583 | 43.624113 | 164 | py |
Age-and-Gender-Recognition | Age-and-Gender-Recognition-main/Age and Gender Recognition using Caffe Model - Youtube.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import cv2
import os
os.chdir('D:\Python37\Projects\Gender-and-Age-Detection- Youtube\Gender-and-Age-Detection\models')
# In[33]:
def detectFace(net,frame,confidence_threshold=0.7):
frameOpencvDNN=frame.copy()
print(frameOpencvDNN.shape)
frameHeight=frameOpencvDNN.shape[0]
frameWidth=frameOpencvDNN.shape[1]
blob=cv2.dnn.blobFromImage(frameOpencvDNN,1.0,(227,227),[124.96,115.97,106.13],swapRB=True,crop=False)
net.setInput(blob)
detections=net.forward()
faceBoxes=[]
for i in range(detections.shape[2]):
confidence=detections[0,0,i,2]
if confidence>confidence_threshold:
x1=int(detections[0,0,i,3]*frameWidth)
y1=int(detections[0,0,i,4]*frameHeight)
x2=int(detections[0,0,i,5]*frameWidth)
y2=int(detections[0,0,i,6]*frameHeight)
faceBoxes.append([x1,y1,x2,y2])
cv2.rectangle(frameOpencvDNN,(x1,y1),(x2,y2),(0,255,0),int(round(frameHeight/150)),8)
return frameOpencvDNN,faceBoxes
faceProto='opencv_face_detector.pbtxt'
faceModel='opencv_face_detector_uint8.pb'
ageProto='age_deploy.prototxt'
ageModel='age_net.caffemodel'
genderProto='gender_deploy.prototxt'
genderModel='gender_net.caffemodel'
genderList=['Male','Female']
ageList=['(0-2)','(4-6)','(8-12)','(15-20)','(25-32)','(38-43)','(48-53)','(60-100)']
faceNet=cv2.dnn.readNet(faceModel,faceProto)
ageNet=cv2.dnn.readNet(ageModel,ageProto)
genderNet=cv2.dnn.readNet(genderModel,genderProto)
video=cv2.VideoCapture(0)
padding=20
while cv2.waitKey(1)<0:
hasFrame,frame=video.read()
if not hasFrame:
cv2.waitKey()
break
resultImg,faceBoxes=detectFace(faceNet,frame)
if not faceBoxes:
print("No face detected")
for faceBox in faceBoxes:
face=frame[max(0,faceBox[1]-padding):min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding):min(faceBox[2]+padding, frame.shape[1]-1)]
blob=cv2.dnn.blobFromImage(face,1.0,(227,227),[124.96,115.97,106.13],swapRB=True,crop=False)
genderNet.setInput(blob)
genderPreds=genderNet.forward()
gender=genderList[genderPreds[0].argmax()]
ageNet.setInput(blob)
agePreds=ageNet.forward()
age=ageList[agePreds[0].argmax()]
cv2.putText(resultImg,f'{gender},{age}',(faceBox[0],faceBox[1]-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,255,255),2,cv2.LINE_AA)
cv2.imshow("Detecting age and Gender",resultImg)
if cv2.waitKey(33) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
# In[ ]:
| 2,680 | 29.123596 | 154 | py |
linbp-attack | linbp-attack-master/attack/imagenet/test.py | import os, sys
import torch
import models as MODEL
import torchvision.transforms as T
import torchvision
import argparse
from torch.backends import cudnn
import numpy as np
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='test')
parser.add_argument('--dir', type=str, default='')
args = parser.parse_args()
print(args)
cudnn.benchmark = False
cudnn.deterministic = True
SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
def normalize(x, ms=None):
if ms == None:
ms = [(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)]
for i in range(x.shape[1]):
x[:,i] = (x[:,i] - ms[0][i]) / ms[1][i]
return x
def test(model, trans):
target = torch.from_numpy(np.load(args.dir + '/labels.npy')).long()
if 'target' in args.dir:
label_switch = torch.tensor(list(range(500, 1000)) + list(range(0, 500))).long()
target = label_switch[target]
img_num = 0
count = 0
advfile_ls = os.listdir(args.dir)
for advfile_ind in range(len(advfile_ls)-1):
adv_batch = torch.from_numpy(np.load(args.dir + '/batch_{}.npy'.format(advfile_ind))).float() / 255
if advfile_ind == 0:
adv_batch_size = adv_batch.shape[0]
img = adv_batch
img_num += img.shape[0]
label = target[advfile_ind * adv_batch_size : advfile_ind*adv_batch_size + adv_batch.shape[0]]
label = label.to(device)
img = img.to(device)
with torch.no_grad():
pred = torch.argmax(model(trans(img)), dim=1).view(1,-1)
count += (label != pred.squeeze(0)).sum().item()
del pred, img
del adv_batch
return round(100. - 100. * count / img_num, 2) if 'target' in args.dir else round(100. * count / img_num, 2)
inceptionv3 = MODEL.inceptionv3.Inception3()
inceptionv3.to(device)
inceptionv3.load_state_dict(torch.load('attack/imagenet/models/ckpt/inception_v3_google-1a9a5a14.pth'))
inceptionv3.eval()
def trans_incep(x):
if 'incep' in args.dir:
return normalize(x, ms = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]).data
else:
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
x = F.interpolate(x, size=(299,299))
return normalize(x, ms = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]).data
print('inceptionv3:', test(model = inceptionv3, trans = trans_incep))
del inceptionv3
pnasnet = MODEL.pnasnet.pnasnet5large(ckpt_dir ='attack/imagenet/models/ckpt/pnasnet5large-bf079911.pth', num_classes=1000, pretrained='imagenet')
pnasnet.to(device)
pnasnet.eval()
def trans_pnas(x):
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
x = F.interpolate(x, size=(331,331))
return normalize(x, ms = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]).data
print('pnasnet:', test(model = pnasnet, trans = trans_pnas))
del pnasnet
senet = MODEL.senet.senet154(ckpt_dir ='attack/imagenet/models/ckpt/senet154-c7b49a05.pth')
senet.to(device)
senet.eval()
def trans_se(x):
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
return normalize(x, ms = None).data
print('senet:', test(model = senet, trans = trans_se))
del senet
densenet = torchvision.models.densenet121(pretrained=False)
densenet.to(device)
import re
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = torch.load('attack/imagenet/models/ckpt/densenet121-a639ec97.pth')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
densenet.load_state_dict(state_dict)
densenet.eval()
print('densenet:', test(model = densenet, trans = trans_se))
del densenet
mobilenet = torchvision.models.mobilenet_v2(pretrained=False)
mobilenet.to(device)
mobilenet.load_state_dict(torch.load('attack/imagenet/models/ckpt/mobilenet_v2-b0353104.pth'))
mobilenet.eval()
print('mobilenet:', test(model = mobilenet, trans = trans_se))
del mobilenet
def trans_ori(x):
if 'incep' in args.dir:
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
return normalize(x, ms = None).data
else:
return normalize(x, ms = None).data
resnet50 = MODEL.resnet.resnet50(state_dict_dir ='attack/imagenet/models/ckpt/resnet50-19c8e357.pth')
resnet50.eval()
resnet50.to(device)
print('resnet50:', test(model = resnet50, trans = trans_ori))
del resnet50 | 4,941 | 35.880597 | 146 | py |
linbp-attack | linbp-attack-master/attack/imagenet/utils.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import numpy as np
from torch.utils.data import Dataset
import csv
import PIL.Image as Image
import os
import torchvision.transforms as T
import pickle
# Selected imagenet. The .csv file format:
# class_index, class, image_name
# 0,n01440764,ILSVRC2012_val_00002138.JPEG
# 2,n01484850,ILSVRC2012_val_00004329.JPEG
# ...
class SelectedImagenet(Dataset):
def __init__(self, imagenet_val_dir, selected_images_csv, transform=None):
super(SelectedImagenet, self).__init__()
self.imagenet_val_dir = imagenet_val_dir
self.selected_images_csv = selected_images_csv
self.transform = transform
self._load_csv()
def _load_csv(self):
reader = csv.reader(open(self.selected_images_csv, 'r'))
next(reader)
self.selected_list = list(reader)
def __getitem__(self, item):
target, target_name, image_name = self.selected_list[item]
image = Image.open(os.path.join(self.imagenet_val_dir, target_name, image_name))
if image.mode != 'RGB':
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, int(target)
def __len__(self):
return len(self.selected_list)
class Normalize(nn.Module):
def __init__(self,):
super(Normalize, self).__init__()
self.ms = [(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)]
def forward(self, input):
x = input.clone()
for i in range(x.shape[1]):
x[:,i] = (x[:,i] - self.ms[0][i]) / self.ms[1][i]
return x
def input_diversity(img):
gg = torch.randint(0, 2, (1,)).item()
if gg == 0:
return img
else:
rnd = torch.randint(224,257, (1,)).item()
rescaled = F.interpolate(img, (rnd, rnd), mode = 'nearest')
h_rem = 256 - rnd
w_hem = 256 - rnd
pad_top = torch.randint(0, h_rem + 1, (1,)).item()
pad_bottom = h_rem - pad_top
pad_left = torch.randint(0, w_hem + 1, (1,)).item()
pad_right = w_hem - pad_left
padded = F.pad(rescaled, pad = (pad_left, pad_right, pad_top, pad_bottom))
padded = F.interpolate(padded, (224, 224), mode = 'nearest')
return padded
def linbp_forw_resnet50(model, x, do_linbp, linbp_layer):
jj = int(linbp_layer.split('_')[0])
kk = int(linbp_layer.split('_')[1])
x = model[0](x)
x = model[1].conv1(x)
x = model[1].bn1(x)
x = model[1].relu(x)
x = model[1].maxpool(x)
ori_mask_ls = []
conv_out_ls = []
relu_out_ls = []
conv_input_ls = []
def layer_forw(jj, kk, jj_now, kk_now, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp):
if jj < jj_now:
x, ori_mask, conv_out, relu_out, conv_in = block_func(mm, x, linbp=True)
ori_mask_ls.append(ori_mask)
conv_out_ls.append(conv_out)
relu_out_ls.append(relu_out)
conv_input_ls.append(conv_in)
elif jj == jj_now:
if kk_now >= kk:
x, ori_mask, conv_out, relu_out, conv_in = block_func(mm, x, linbp=True)
ori_mask_ls.append(ori_mask)
conv_out_ls.append(conv_out)
relu_out_ls.append(relu_out)
conv_input_ls.append(conv_in)
else:
x, _, _, _, _ = block_func(mm, x, linbp=False)
else:
x, _, _, _, _ = block_func(mm, x, linbp=False)
return x, ori_mask_ls
for ind, mm in enumerate(model[1].layer1):
x, ori_mask_ls = layer_forw(jj, kk, 1, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
for ind, mm in enumerate(model[1].layer2):
x, ori_mask_ls = layer_forw(jj, kk, 2, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
for ind, mm in enumerate(model[1].layer3):
x, ori_mask_ls = layer_forw(jj, kk, 3, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
for ind, mm in enumerate(model[1].layer4):
x, ori_mask_ls = layer_forw(jj, kk, 4, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
x = model[1].avgpool(x)
x = torch.flatten(x, 1)
x = model[1].fc(x)
return x, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls
def block_func(block, x, linbp):
identity = x
conv_in = x+0
out = block.conv1(conv_in)
out = block.bn1(out)
out_0 = out + 0
if linbp:
out = linbp_relu(out_0)
else:
out = block.relu(out_0)
ori_mask_0 = out.data.bool().int()
out = block.conv2(out)
out = block.bn2(out)
out_1 = out + 0
if linbp:
out = linbp_relu(out_1)
else:
out = block.relu(out_1)
ori_mask_1 = out.data.bool().int()
out = block.conv3(out)
out = block.bn3(out)
if block.downsample is not None:
identity = block.downsample(identity)
identity_out = identity + 0
x_out = out + 0
out = identity_out + x_out
out = block.relu(out)
ori_mask_2 = out.data.bool().int()
return out, (ori_mask_0, ori_mask_1, ori_mask_2), (identity_out, x_out), (out_0, out_1), (0, conv_in)
def linbp_relu(x):
x_p = F.relu(-x)
x = x + x_p.data
return x
def linbp_backw_resnet50(img, loss, conv_out_ls, ori_mask_ls, relu_out_ls, conv_input_ls, xp):
for i in range(-1, -len(conv_out_ls)-1, -1):
if i == -1:
grads = torch.autograd.grad(loss, conv_out_ls[i])
else:
grads = torch.autograd.grad((conv_out_ls[i+1][0], conv_input_ls[i+1][1]), conv_out_ls[i], grad_outputs=(grads[0], main_grad_norm))
normal_grad_2 = torch.autograd.grad(conv_out_ls[i][1], relu_out_ls[i][1], grads[1]*ori_mask_ls[i][2],retain_graph=True)[0]
normal_grad_1 = torch.autograd.grad(relu_out_ls[i][1], relu_out_ls[i][0], normal_grad_2 * ori_mask_ls[i][1], retain_graph=True)[0]
normal_grad_0 = torch.autograd.grad(relu_out_ls[i][0], conv_input_ls[i][1], normal_grad_1 * ori_mask_ls[i][0], retain_graph=True)[0]
del normal_grad_2, normal_grad_1
main_grad = torch.autograd.grad(conv_out_ls[i][1], conv_input_ls[i][1], grads[1])[0]
alpha = normal_grad_0.norm(p=2, dim = (1,2,3), keepdim = True) / main_grad.norm(p=2,dim = (1,2,3), keepdim=True)
main_grad_norm = xp * alpha * main_grad
input_grad = torch.autograd.grad((conv_out_ls[0][0], conv_input_ls[0][1]), img, grad_outputs=(grads[0], main_grad_norm))
return input_grad[0].data
def ila_forw_resnet50(model, x, ila_layer):
jj = int(ila_layer.split('_')[0])
kk = int(ila_layer.split('_')[1])
x = model[0](x)
x = model[1].conv1(x)
x = model[1].bn1(x)
x = model[1].relu(x)
if jj == 0 and kk ==0:
return x
x = model[1].maxpool(x)
for ind, mm in enumerate(model[1].layer1):
x = mm(x)
if jj == 1 and ind == kk:
return x
for ind, mm in enumerate(model[1].layer2):
x = mm(x)
if jj == 2 and ind == kk:
return x
for ind, mm in enumerate(model[1].layer3):
x = mm(x)
if jj == 3 and ind == kk:
return x
for ind, mm in enumerate(model[1].layer4):
x = mm(x)
if jj == 4 and ind == kk:
return x
return False
class ILAProjLoss(torch.nn.Module):
def __init__(self):
super(ILAProjLoss, self).__init__()
def forward(self, old_attack_mid, new_mid, original_mid, coeff):
n = old_attack_mid.shape[0]
x = (old_attack_mid - original_mid).view(n, -1)
y = (new_mid - original_mid).view(n, -1)
# x_norm = x / torch.norm(x, dim = 1, keepdim = True)
proj_loss =torch.sum(y * x) / n
return proj_loss | 7,806 | 36 | 142 | py |
linbp-attack | linbp-attack-master/attack/imagenet/attack_resnet50.py | import os, sys
import torch
import torchvision.transforms as T
import torch.nn as nn
import argparse
import torch.nn.functional as F
import torchvision
import models as MODEL
from torch.backends import cudnn
import numpy as np
from utils import SelectedImagenet, Normalize, input_diversity, \
linbp_forw_resnet50, linbp_backw_resnet50, ila_forw_resnet50, ILAProjLoss
parser = argparse.ArgumentParser()
parser.add_argument('--epsilon', type=float, default=0.03)
parser.add_argument('--sgm_lambda', type=float, default=1.0)
parser.add_argument('--niters', type=int, default=300)
parser.add_argument('--ila_niters', type=int, default=100)
parser.add_argument('--method', type=str, default = 'linbp_ila_pgd')
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--linbp_layer', type=str, default='3_1')
parser.add_argument('--ila_layer', type=str, default='2_3')
parser.add_argument('--save_dir', type=str, default = '')
parser.add_argument('--target_attack', default=False, action='store_true')
args = parser.parse_args()
if __name__ == '__main__':
print(args)
cudnn.benchmark = False
cudnn.deterministic = True
SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
os.makedirs(args.save_dir, exist_ok=True)
epsilon = args.epsilon
batch_size = args.batch_size
method = args.method
ila_layer = args.ila_layer
linbp_layer = args.linbp_layer
save_dir = args.save_dir
niters = args.niters
ila_niters = args.ila_niters
target_attack = args.target_attack
sgm_lambda = args.sgm_lambda
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
trans = T.Compose([
T.Resize((256,256)),
T.CenterCrop((224,224)),
T.ToTensor()
])
dataset = SelectedImagenet(imagenet_val_dir='data/imagenet/ILSVRC2012_img_val',
selected_images_csv='data/imagenet/selected_imagenet.csv',
transform=trans
)
ori_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers = 8, pin_memory = False)
model = MODEL.resnet.resnet50(state_dict_dir ='attack/imagenet/models/ckpt/resnet50-19c8e357.pth')
model.eval()
model = nn.Sequential(
Normalize(),
model
)
model.to(device)
if target_attack:
label_switch = torch.tensor(list(range(500,1000))+list(range(0,500))).long()
label_ls = []
for ind, (ori_img, label)in enumerate(ori_loader):
label_ls.append(label)
if target_attack:
label = label_switch[label]
ori_img = ori_img.to(device)
img = ori_img.clone()
m = 0
for i in range(niters):
# In our implementation of PGD, we incorporate randomness at each iteration to further enhance the transferability
if 'pgd' in method:
img_x = img + img.new(img.size()).uniform_(-epsilon, epsilon)
else:
img_x = img
img_x.requires_grad_(True)
if 'linbp' in method:
att_out, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls = linbp_forw_resnet50(model, img_x, True, linbp_layer)
pred = torch.argmax(att_out, dim=1).view(-1)
loss = nn.CrossEntropyLoss()(att_out, label.to(device))
model.zero_grad()
input_grad = linbp_backw_resnet50(img_x, loss, conv_out_ls, ori_mask_ls, relu_out_ls, conv_input_ls, xp=sgm_lambda)
else:
if method == 'mdi2fgsm' or method == 'linbp_mdi2fgsm':
att_out = model(input_diversity(img_x))
else:
att_out = model(img_x)
pred = torch.argmax(att_out, dim=1).view(-1)
loss = nn.CrossEntropyLoss()(att_out, label.to(device))
model.zero_grad()
loss.backward()
input_grad = img_x.grad.data
model.zero_grad()
if 'mdi2fgsm' in method or 'mifgsm' in method:
input_grad = 1 * m + input_grad / torch.norm(input_grad, dim=(1, 2, 3), p=1, keepdim=True)
m = input_grad
if target_attack:
input_grad = - input_grad
if method == 'fgsm' or '_fgsm' in method:
img = img.data + 2 * epsilon * torch.sign(input_grad)
else:
img = img.data + 1./255 * torch.sign(input_grad)
img = torch.where(img > ori_img + epsilon, ori_img + epsilon, img)
img = torch.where(img < ori_img - epsilon, ori_img - epsilon, img)
img = torch.clamp(img, min=0, max=1)
if 'ila' in method:
attack_img = img.clone()
img = ori_img.clone().to(device)
with torch.no_grad():
mid_output = ila_forw_resnet50(model, ori_img, ila_layer)
mid_original = torch.zeros(mid_output.size()).to(device)
mid_original.copy_(mid_output)
mid_output = ila_forw_resnet50(model, attack_img, ila_layer)
mid_attack_original = torch.zeros(mid_output.size()).to(device)
mid_attack_original.copy_(mid_output)
for _ in range(ila_niters):
img.requires_grad_(True)
mid_output = ila_forw_resnet50(model, img, ila_layer)
loss = ILAProjLoss()(
mid_attack_original.detach(), mid_output, mid_original.detach(), 1.0
)
model.zero_grad()
loss.backward()
input_grad = img.grad.data
model.zero_grad()
if method == 'ila_fgsm':
img = img.data + 2 * epsilon * torch.sign(input_grad)
else:
img = img.data + 1./255 * torch.sign(input_grad)
img = torch.where(img > ori_img + epsilon, ori_img + epsilon, img)
img = torch.where(img < ori_img - epsilon, ori_img - epsilon, img)
img = torch.clamp(img, min=0, max=1)
del mid_output, mid_original, mid_attack_original
np.save(save_dir + '/batch_{}.npy'.format(ind), torch.round(img.data*255).cpu().numpy().astype(np.uint8()))
del img, ori_img, input_grad
print('batch_{}.npy saved'.format(ind))
label_ls = torch.cat(label_ls)
np.save(save_dir + '/labels.npy', label_ls.numpy())
print('images saved')
| 6,574 | 40.878981 | 132 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.