repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ADaPTION | ADaPTION-master/frcnn/lib/rpn/anchor_target_layer.py | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import os
import caffe
import yaml
from fast_rcnn.config import cfg
import numpy as np
import numpy.random as npr
from generate_anchors import generate_anchors
from utils.cython_bbox import bbox_overlaps
from fast_rcnn.bbox_transform import bbox_transform
DEBUG = True
class AnchorTargetLayer(caffe.Layer):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str)
anchor_scales = layer_params.get('scales', (8, 16, 32))
self._anchors = generate_anchors(scales=np.array(anchor_scales))
self._num_anchors = self._anchors.shape[0]
self._feat_stride = layer_params['feat_stride']
if DEBUG:
print 'anchors:'
print self._anchors
print 'anchor shapes:'
print np.hstack((
self._anchors[:, 2::4] - self._anchors[:, 0::4],
self._anchors[:, 3::4] - self._anchors[:, 1::4],
))
self._counts = cfg.EPS
self._sums = np.zeros((1, 4))
self._squared_sums = np.zeros((1, 4))
self._fg_sum = 0
self._bg_sum = 0
self._count = 0
# allow boxes to sit over the edge by a small amount
self._allowed_border = layer_params.get('allowed_border', 0)
height, width = bottom[0].data.shape[-2:]
if DEBUG:
print 'AnchorTargetLayer: height', height, 'width', width
A = self._num_anchors
# labels
top[0].reshape(1, 1, A * height, width)
# bbox_targets
top[1].reshape(1, A * 4, height, width)
# bbox_inside_weights
top[2].reshape(1, A * 4, height, width)
# bbox_outside_weights
top[3].reshape(1, A * 4, height, width)
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
# measure GT overlap
assert bottom[0].data.shape[0] == 1, \
'Only single item batches are supported'
# map of shape (..., H, W)
height, width = bottom[0].data.shape[-2:]
# GT boxes (x1, y1, x2, y2, label)
gt_boxes = bottom[1].data
# im_info
im_info = bottom[2].data[0, :]
if DEBUG:
print ''
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
print 'height, width: ({}, {})'.format(height, width)
print 'rpn: gt_boxes.shape', gt_boxes.shape
print 'rpn: gt_boxes', gt_boxes
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
all_anchors = (self._anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -self._allowed_border) &
(all_anchors[:, 1] >= -self._allowed_border) &
(all_anchors[:, 2] < im_info[1] + self._allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + self._allowed_border) # height
)[0]
if DEBUG:
print 'total_anchors', total_anchors
print 'all_anchors', all_anchors
print 'allowed borders', self._allowed_border
print 'Shape of all_anchors', np.shape(all_anchors)
print 'inds_inside', len(inds_inside)
print 'GT Boxes:', gt_boxes
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
if DEBUG:
print 'anchors.shape', anchors.shape
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside), ), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(
np.ascontiguousarray(anchors, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
# print "was %s inds, disabling %s, now %s inds" % (
# len(bg_inds), len(disable_inds), np.sum(labels == 0))
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
if DEBUG:
self._sums += bbox_targets[labels == 1, :].sum(axis=0)
self._squared_sums += (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
self._counts += np.sum(labels == 1)
means = self._sums / self._counts
stds = np.sqrt(self._squared_sums / self._counts - means ** 2)
print 'means:'
print means
print 'stdevs:'
print stds
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
if DEBUG:
print 'rpn: max max_overlap', np.max(max_overlaps)
print 'rpn: num_positive', np.sum(labels == 1)
print 'rpn: num_negative', np.sum(labels == 0)
self._fg_sum += np.sum(labels == 1)
self._bg_sum += np.sum(labels == 0)
self._count += 1
print 'rpn: num_positive avg', self._fg_sum / self._count
print 'rpn: num_negative avg', self._bg_sum / self._count
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
top[0].reshape(*labels.shape)
top[0].data[...] = labels
# bbox_targets
bbox_targets = bbox_targets \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
top[1].reshape(*bbox_targets.shape)
top[1].data[...] = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_inside_weights.shape[2] == height
assert bbox_inside_weights.shape[3] == width
top[2].reshape(*bbox_inside_weights.shape)
top[2].data[...] = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_outside_weights.shape[2] == height
assert bbox_outside_weights.shape[3] == width
top[3].reshape(*bbox_outside_weights.shape)
top[3].data[...] = bbox_outside_weights
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
def _compute_targets(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 5
return bbox_transform(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False)
| 11,575 | 39.055363 | 95 | py |
ADaPTION | ADaPTION-master/frcnn/lib/transform/torch_image_transform_layer.py | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
""" Transform images for compatibility with models trained with
https://github.com/facebook/fb.resnet.torch.
Usage in model prototxt:
layer {
name: 'data_xform'
type: 'Python'
bottom: 'data_caffe'
top: 'data'
python_param {
module: 'transform.torch_image_transform_layer'
layer: 'TorchImageTransformLayer'
}
}
"""
import caffe
from fast_rcnn.config import cfg
import numpy as np
class TorchImageTransformLayer(caffe.Layer):
def setup(self, bottom, top):
# (1, 3, 1, 1) shaped arrays
self.PIXEL_MEANS = \
np.array([[[[0.48462227599918]],
[[0.45624044862054]],
[[0.40588363755159]]]])
self.PIXEL_STDS = \
np.array([[[[0.22889466674951]],
[[0.22446679341259]],
[[0.22495548344775]]]])
# The default ("old") pixel means that were already subtracted
channel_swap = (0, 3, 1, 2)
self.OLD_PIXEL_MEANS = \
cfg.PIXEL_MEANS[np.newaxis, :, :, :].transpose(channel_swap)
top[0].reshape(*(bottom[0].shape))
def forward(self, bottom, top):
ims = bottom[0].data
# Invert the channel means that were already subtracted
ims += self.OLD_PIXEL_MEANS
# 1. Permute BGR to RGB and normalize to [0, 1]
ims = ims[:, [2, 1, 0], :, :] / 255.0
# 2. Remove channel means
ims -= self.PIXEL_MEANS
# 3. Standardize channels
ims /= self.PIXEL_STDS
top[0].reshape(*(ims.shape))
top[0].data[...] = ims
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
| 2,000 | 29.784615 | 72 | py |
selectionfunctions | selectionfunctions-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# dustmaps documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 14 17:20:58 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.join(os.path.dirname(__name__), '..'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
# 'sphinxcontrib.googleanalytics'
# 'sphinxcontrib.programoutput'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'selectionfunctions'
copyright = u'2019, Douglas Boubert & Andrew Everall'
author = u'Douglas Boubert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'v0.1'
# The full version, including alpha/beta/rc tags.
release = u'v0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'dustmaps vv0.1a3'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'selectionfunctionsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'selectionfunctions.tex', u'selectionfunctions Documentation',
u'Douglas Boubert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'selectionfunctions', u'selectionfunctions Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'selectionfunctions', u'selectionfunctions Documentation',
author, 'selectionfunctions', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Google Analytics
# googleanalytics_id = 'UA-57454625-3'
# Mock modules, rather than importing them.
# import sys
# from mock import Mock as MagicMock
#
# class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return Mock()
autodoc_mock_imports = [
'astropy',
'astropy.coordinates',
'astropy.coordinates.SkyCoord',
'astropy.io',
'astropy.io.fits',
'astropy.units',
'astropy.wcs',
'contextlib',
'contextlib.closing',
'h5py',
'hashlib',
'healpy',
'numpy',
'PIL',
'PIL.Image',
'scipy',
'scipy.ndimage',
'scipy.ndimage.map_coordinates',
'scipy.spatial',
'scipy.spatial.cKDTree',
'shutil']
# 'progressbar',
# 'progressbar.ProgressBar',
# 'progressbar.widgets',
# 'progressbar.widgets.DataSize',
# 'progressbar.widgets.AdaptiveTransferSpeed',
# 'progressbar.widgets.Bar',
# 'progressbar.widgets.AdaptiveETA',
# 'progressbar.widgets.Percentage',
# 'progressbar.widgets.FormatCustomText',
# 'progressbar.utils',
# 'progressbar.utils.scale_1024']
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
| 11,441 | 27.605 | 80 | py |
stancy | stancy-master/run_classifier.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from sklearn.metrics import classification_report
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME, BertForSequenceClassificationDualLoss
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, sim_label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.sim_label_id = sim_label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class StanceProcessor(DataProcessor):
"""Processor for the Stance data set """
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["supports", "refutes"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[0])
claim = line[1]
evidence = line[2]
label = line[3]
examples.append(
InputExample(guid=guid, text_a=claim, text_b=evidence, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if label_id == 1.0:
sim_label_id = -1.0
elif label_id == 0.0:
sim_label_id = 1.0
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
logger.info("sim_label: %s (id = %d)" % (example.label, sim_label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
sim_label_id=sim_label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--alpha",
default=0.5,
type=float,
help="Weight given to cross entropy loss. (1-alpha) weight for the cosine similarity.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"stance": StanceProcessor,
}
num_labels_task = {
"cola": 2,
"sst-2": 2,
"mnli": 3,
"mrpc": 2,
"stance": 2,
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
# CHANGE HERE
model = BertForSequenceClassificationDualLoss.from_pretrained(args.bert_model,
cache_dir=cache_dir,
num_labels = num_labels)
#model = BertForSequenceClassification.from_pretrained(args.bert_model,
#cache_dir=cache_dir,
#num_labels = num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_sim_label_ids = torch.tensor([f.sim_label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_sim_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
process_bar = tqdm(train_dataloader)
for step, batch in enumerate(process_bar):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, sim_label_ids = batch
# CHANGE HERE
loss = model(input_ids, segment_ids, input_mask, label_ids, sim_label_ids)
#loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
process_bar.set_description("Loss: %0.8f" % (loss.sum().item()))
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
print("\nLoss: {}\n".format(tr_loss / nb_tr_steps))
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
# Load a trained model and config that you have fine-tuned
config = BertConfig(output_config_file)
# CHANGE HERE
model = BertForSequenceClassificationDualLoss(config, num_labels=num_labels)
#model = BertForSequenceClassification(config, num_labels=num_labels)
model.load_state_dict(torch.load(output_model_file))
else:
model = BertForSequenceClassificationDualLoss.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_test_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_sim_label_ids = torch.tensor([f.sim_label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_sim_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predicted_labels = []
predicted_prob = []
gold_labels = []
for input_ids, input_mask, segment_ids, label_ids, sim_label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
sim_label_ids = sim_label_ids.to(device)
with torch.no_grad():
# CHANGE HERE
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids, sim_label_ids)
logits = model(input_ids, segment_ids, input_mask)
#tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
#logits = model(input_ids, segment_ids, input_mask)
predicted_prob.extend(torch.nn.functional.softmax(logits, dim=1))
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
tmp_predicted = np.argmax(logits, axis=1)
predicted_labels.extend(tmp_predicted.tolist())
gold_labels.extend(label_ids.tolist())
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': loss}
output_eval_log_file = os.path.join(args.output_dir, "eval_details.txt")
writer = open(output_eval_log_file, "w")
for prob, pred_label, gold_label in zip(predicted_prob, predicted_labels, gold_labels):
writer.write("{}\t{}\t{}\n".format(prob.cpu().tolist(), pred_label, gold_label))
writer.close()
print(classification_report(gold_labels, predicted_labels, target_names=label_list, digits=4))
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for k,v in sorted(vars(args).items()):
writer.write("{}={}\n".format(k,v))
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write(classification_report(gold_labels, predicted_labels, target_names=label_list, digits=4))
if __name__ == "__main__":
main()
| 32,594 | 41.607843 | 144 | py |
stancy | stancy-master/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, CosineEmbeddingLoss, MSELoss
from file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForSequenceClassificationIntentLoss(BertPreTrainedModel):
def __init__(self, config, num_labels, alpha):
super(BertForSequenceClassificationIntentLoss, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size + 1, num_labels)
self.cosine = nn.CosineSimilarity()
self.alpha = alpha
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, sim_labels=None):
sen1_attention_mask = (1 - token_type_ids) * attention_mask
_, pooled_output_combined = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output_combined = self.dropout(pooled_output_combined)
_, pooled_output_sen1 = self.bert(input_ids, token_type_ids, sen1_attention_mask, output_all_encoded_layers=False)
cos_sim = self.cosine(pooled_output_combined, pooled_output_sen1).unsqueeze(1)
combined = torch.cat([pooled_output_combined, cos_sim], dim=1)
logits = self.classifier(combined)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss_bert = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
#print("Labels:", labels[10:])
#new_labels = (1.0 - labels) + (labels * -1.0)
#print("New Labels:", new_labels[10:])
loss_cosine = CosineEmbeddingLoss()
loss_intent = loss_cosine(pooled_output_combined, pooled_output_sen1, sim_labels.float())
loss = self.alpha * loss_bert + (1 - self.alpha) * loss_intent
return loss
else:
return logits
class BertForSequenceClassificationDualLoss(BertPreTrainedModel):
def __init__(self, config, num_labels):
super(BertForSequenceClassificationDualLoss, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size + 1, num_labels)
self.cosine = nn.CosineSimilarity()
self.alpha = 0.5
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, sim_labels=None):
sen1_attention_mask = (1 - token_type_ids) * attention_mask
_, pooled_output_combined = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output_combined = self.dropout(pooled_output_combined)
_, pooled_output_sen1 = self.bert(input_ids, token_type_ids, sen1_attention_mask, output_all_encoded_layers=False)
cos_sim = self.cosine(pooled_output_combined, pooled_output_sen1).unsqueeze(1)
combined = torch.cat([pooled_output_combined, cos_sim], dim=1)
logits = self.classifier(combined)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss_bert = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
loss_cosine = CosineEmbeddingLoss()
loss_intent = loss_cosine(pooled_output_combined, pooled_output_sen1, sim_labels.float())
loss = loss_bert + loss_intent
return loss
else:
return logits
| 40,433 | 45.475862 | 139 | py |
stancy | stancy-master/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,295 | 32.184 | 112 | py |
thermo | thermo-master/docs/conf.py | #
# thermo documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 2 17:15:23 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import time
#import sys
#from mock import Mock as MagicMock
#
#
#class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return Mock()
#
#MOCK_MODULES = ['scipy', 'scipy.interpolate', 'scipy.constants', 'argparse',
#'numpy', 'pandas', 'scipy.optimize', 'fluids', 'costing', 'fluids.friction',
#'fluids.piping', 'fluids.friction_factor']
#sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
#'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.intersphinx',
'nbsphinx',
'matplotlib.sphinxext.plot_directive',
'sphinxcontrib.katex',
'sphinx_sitemap',
]
katex_css_path = \
'https://cdn.jsdelivr.net/npm/katex@0.12.0/dist/katex.min.css'
katex_js_path = 'katex.min.js'
katex_autorender_path = 'auto-render.min.js'
nbsphinx_requirejs_path = '' # fixes katex not working
# 'sphinx.ext.napoleon'
html_baseurl = 'https://thermo.readthedocs.io/'
sitemap_url_scheme = "{link}"
sitemap_filename = 'sitemap2.xml' # readthedocs generates its own
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Thermo'
import datetime
build_date = datetime.datetime.utcfromtimestamp(
int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
)
copyright = '2016 - %s, Caleb Bell and contributors' %build_date.year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import thermo
from thermo import *
# The short X.Y version.
version = thermo.__version__
# The full version, including alpha/beta/rc tags.
release = thermo.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots.txt']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'thermo doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'thermo.tex', 'thermo Documentation',
'Caleb Bell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'thermo', 'thermo Documentation',
['Caleb Bell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'thermo', 'thermo Documentation',
'Caleb Bell', 'thermo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {'python': ('https://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'chemicals': ('https://chemicals.readthedocs.io/', None),
'fluids': ('https://fluids.readthedocs.io/', None)}
html_theme = "nature"
numpydoc_show_class_members = True
numpydoc_show_inherited_class_members = True
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
autosummary_generate = True
add_function_parentheses = False
autodoc_default_flags = ['undoc-members', 'show-inheritance']
nbsphinx_requirejs_path = '' # fixes katex not working
plot_rcparams = {'savefig.bbox': 'tight'}
plot_apply_rcparams = True # if context option is used
#autodoc_default_flags = ['members', 'private-members', 'special-members',
# #'undoc-members',
# 'show-inheritance']
#def autodoc_skip_member(app, what, name, obj, skip, options):
# #exclusions = ('__doc__', '__module__', '__dict__')
# if hasattr(obj, 'doc') and 'Chemical Engineering Design Library' in obj.doc:
# print(obj.__doc__, what, name)
# #exclude = name in exclusions
# #return skip or exclude
# return True
#def setup(app):
# app.connect('autodoc-skip-member', autodoc_skip_member)
#def maybe_skip_member(app, what, name, obj, skip,
#options):
#if name == 'Chemical':
#print('hi3', dir(options['members']))
#print('hi2', app.config, dir(app.config) )
#print app, what, name, obj, skip, options
#print(dir(app))
#return False
katex_prerender = True
import thermo
try:
import thermo.numba
except:
pass
from sphinx.ext.autodoc import between
def setup(app):
# Register a sphinx.ext.autodoc.between listener to ignore everything
# between lines that contain the word IGNORE
app.connect('autodoc-process-docstring', between('(^Chemical Engineering Design Library).*|(^SOFTWARE.$).*', exclude=True))
#app.connect('autodoc-skip-member', maybe_skip_member)
return app
| 11,870 | 30.571809 | 127 | py |
FeatureRE | FeatureRE-main/reverse_engineering.py | import torch
from torch import Tensor, nn
import torchvision
import os
import numpy as np
from resnet_nole import *
from models import meta_classifier_cifar10_model,lenet,ULP_model,preact_resnet
import torch.nn.functional as F
import unet_model
import random
import pilgram
from PIL import Image
from functools import reduce
class RegressionModel(nn.Module):
def __init__(self, opt, init_mask):
self._EPSILON = opt.EPSILON
super(RegressionModel, self).__init__()
if init_mask is not None:
self.mask_tanh = nn.Parameter(torch.tensor(init_mask))
self.classifier = self._get_classifier(opt)
self.example_features = None
if opt.dataset == "mnist":
self.AE = unet_model.UNet(n_channels=1,num_classes=1,base_filter_num=opt.ae_filter_num, num_blocks=opt.ae_num_blocks)
else:
self.AE = unet_model.UNet(n_channels=3,num_classes=3,base_filter_num=opt.ae_filter_num, num_blocks=opt.ae_num_blocks)
self.AE.train()
self.example_ori_img = None
self.example_ae_img = None
self.opt = opt
def forward_ori(self, x,opt):
features = self.classifier.from_input_to_features(x, opt.internal_index)
out = self.classifier.from_features_to_output(features, opt.internal_index)
return out, features
def forward_flip_mask(self, x,opt):
strategy = "flip"
features = self.classifier.from_input_to_features(x, opt.internal_index)
if strategy == "flip":
features = (1 - opt.flip_mask) * features - opt.flip_mask * features
elif strategy == "zero":
features = (1 - opt.flip_mask) * features
out = self.classifier.from_features_to_output(features, opt.internal_index)
return out, features
def forward_ae(self, x,opt):
self.example_ori_img = x
x_before_ae = x
x = self.AE(x)
x_after_ae = x
self.example_ae_img = x
features = self.classifier.from_input_to_features(x, opt.internal_index)
out = self.classifier.from_features_to_output(features, opt.internal_index)
self.example_features = features
return out, features, x_before_ae, x_after_ae
def forward_ae_mask_p(self, x,opt):
mask = self.get_raw_mask(opt)
self.example_ori_img = x
x_before_ae = x
x = self.AE(x)
x_after_ae = x
self.example_ae_img = x
features = self.classifier.from_input_to_features(x, opt.internal_index)
reference_features_index_list = np.random.choice(range(opt.all_features.shape[0]), features.shape[0], replace=True)
reference_features = opt.all_features[reference_features_index_list]
features_ori = features
features = mask * features + (1-mask) * reference_features.reshape(features.shape)
out = self.classifier.from_features_to_output(features, opt.internal_index)
self.example_features = features_ori
return out, features, x_before_ae, x_after_ae, features_ori
def forward_ae_mask_p_test(self, x,opt):
mask = self.get_raw_mask(opt)
self.example_ori_img = x
x_before_ae = x
x = self.AE(x)
x_after_ae = x
self.example_ae_img = x
features = self.classifier.from_input_to_features(x, opt.internal_index)
bs = features.shape[0]
index_1 = list(range(bs))
random.shuffle(index_1)
reference_features = features[index_1]
features_ori = features
features = mask * features + (1-mask) * reference_features.reshape(features.shape)
out = self.classifier.from_features_to_output(features, opt.internal_index)
self.example_features = features_ori
return out, features, x_before_ae, x_after_ae, features_ori
def get_raw_mask(self,opt):
mask = nn.Tanh()(self.mask_tanh)
bounded = mask / (2 + self._EPSILON) + 0.5
return bounded
def _get_classifier(self, opt):
if opt.set_arch:
if opt.set_arch == "resnet18":
classifier = resnet18(num_classes = opt.num_classes, in_channels = opt.input_channel)
elif opt.set_arch=="preact_resnet18":
classifier = preact_resnet.PreActResNet18(num_classes=opt.num_classes)
elif opt.set_arch=="meta_classifier_cifar10_model":
classifier = meta_classifier_cifar10_model.MetaClassifierCifar10Model()
elif opt.set_arch=="mnist_lenet":
classifier = lenet.LeNet5()
elif opt.set_arch=="ulp_vgg":
classifier = ULP_model.CNN_classifier()
else:
print("invalid arch")
if opt.hand_set_model_path:
ckpt_path = opt.hand_set_model_path
state_dict = torch.load(ckpt_path)
try:
classifier.load_state_dict(state_dict["net_state_dict"])
except:
try:
classifier.load_state_dict(state_dict["netC"])
except:
try:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict["model"].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
classifier.load_state_dict(new_state_dict)
except:
classifier.load_state_dict(state_dict)
for param in classifier.parameters():
param.requires_grad = False
classifier.eval()
return classifier.to(opt.device)
class Recorder:
def __init__(self, opt):
super().__init__()
self.mixed_value_best = float("inf")
def test_ori(opt, regression_model, testloader, flip=False):
regression_model.eval()
regression_model.AE.eval()
regression_model.classifier.eval()
total_pred = 0
true_pred = 0
cross_entropy = nn.CrossEntropyLoss()
for inputs,labels in testloader:
inputs = inputs.to(opt.device)
labels = labels.to(opt.device)
sample_num = inputs.shape[0]
total_pred += sample_num
target_labels = torch.ones((sample_num), dtype=torch.int64).to(opt.device) * opt.target_label
if flip:
out, features = regression_model.forward_flip_mask(inputs,opt)
else:
out, features = regression_model.forward_ori(inputs,opt)
predictions = out
true_pred += torch.sum(torch.argmax(predictions, dim=1) == labels).detach()
loss_ce = cross_entropy(predictions, target_labels)
print("BA true_pred:",true_pred)
print("BA total_pred:",total_pred)
print(
"BA test acc:",true_pred * 100.0 / total_pred
)
def test_ori_attack(opt, regression_model, testloader, flip=False):
regression_model.eval()
regression_model.AE.eval()
regression_model.classifier.eval()
total_pred = 0
true_pred = 0
cross_entropy = nn.CrossEntropyLoss()
for inputs,labels in testloader:
inputs = inputs.to(opt.device)
if opt.asr_test_type == "filter":
t_mean = opt.t_mean.cuda()
t_std = opt.t_std.cuda()
GT_img = inputs
GT_img = (torch.clamp(GT_img*t_std+t_mean, min=0, max=1).detach().cpu().numpy()*255).astype(np.uint8)
for j in range(GT_img.shape[0]):
ori_pil_img = Image.fromarray(GT_img[j].transpose((1,2,0)))
convered_pil_img = pilgram._1977(ori_pil_img)
GT_img[j] = np.asarray(convered_pil_img).transpose((2,0,1))
GT_img = GT_img.astype(np.float32)
GT_img = GT_img/255
GT_img = torch.from_numpy(GT_img).cuda()
GT_img = (GT_img - t_mean)/t_std
inputs = GT_img
elif opt.asr_test_type == "wanet":
inputs = F.grid_sample(inputs, opt.grid_temps.repeat(inputs.shape[0], 1, 1, 1), align_corners=True)
inputs = inputs.to(opt.device)
labels = labels.to(opt.device)
sample_num = inputs.shape[0]
total_pred += sample_num
target_labels = torch.ones((sample_num), dtype=torch.int64).to(opt.device) * opt.target_label
if flip:
out, features = regression_model.forward_flip_mask(inputs,opt)
else:
out, features = regression_model.forward_ori(inputs,opt)
predictions = out
true_pred += torch.sum(torch.argmax(predictions, dim=1) == target_labels).detach()
loss_ce = cross_entropy(predictions, target_labels)
print("ASR true_pred:",true_pred)
print("ASR total_pred:",total_pred)
print(
"ASR test acc:",true_pred * 100.0 / total_pred
)
def fix_neuron_flip(opt,trainloader,testloader,testloader_asr):
trained_regression_model = opt.trained_regression_model
trained_regression_model.eval()
trained_regression_model.AE.eval()
trained_regression_model.classifier.eval()
if opt.asr_test_type == "wanet":
ckpt_path = opt.hand_set_model_path
state_dict = torch.load(ckpt_path)
identity_grid = state_dict["identity_grid"]
noise_grid = state_dict["noise_grid"]
grid_temps = (identity_grid + 0.5 * noise_grid / opt.input_height) * 1
grid_temps = torch.clamp(grid_temps, -1, 1)
opt.grid_temps = grid_temps
test_ori(opt, trained_regression_model,testloader,flip=False)
test_ori_attack(opt, trained_regression_model,testloader_asr,flip=False)
neuron_finding_strategy = "hyperplane"
cross_entropy = nn.CrossEntropyLoss()
for batch_idx, (inputs, labels) in enumerate(trainloader):
inputs = inputs.to(opt.device)
labels = labels.to(opt.device)
out, features_reversed, x_before_ae, x_after_ae = trained_regression_model.forward_ae(inputs,opt)
loss_ce_transformed = cross_entropy(out, labels)
out, features_ori = trained_regression_model.forward_ori(inputs,opt)
loss_ce_ori = cross_entropy(out, labels)
feature_dist = torch.nn.MSELoss(reduction='none').cuda()(features_ori,features_reversed).mean(0)
print(feature_dist)
if neuron_finding_strategy == "diff":
values, indices = feature_dist.reshape(-1).topk(int(0.03*torch.numel(feature_dist)), largest=True, sorted=True)
flip_mask = torch.zeros(feature_dist.reshape(-1).shape).to(opt.device)
for index in indices:
flip_mask[index] = 1
flip_mask = flip_mask.reshape(feature_dist.shape)
elif neuron_finding_strategy == "hyperplane":
flip_mask = trained_regression_model.get_raw_mask(opt)
opt.flip_mask = flip_mask
print("loss_ce_transformed:",loss_ce_transformed)
print("loss_ce_ori:",loss_ce_ori)
test_ori(opt, trained_regression_model,testloader,flip=True)
test_ori_attack(opt, trained_regression_model,testloader_asr,flip=True)
def train(opt, init_mask):
data_now = opt.data_now
opt.weight_p = 1
opt.weight_acc = 1
opt.weight_std = 1
opt.init_mask = init_mask
recorder = Recorder(opt)
regression_model = RegressionModel(opt, init_mask).to(opt.device)
opt.epoch = 400
if opt.override_epoch:
opt.epoch = opt.override_epoch
optimizerR = torch.optim.Adam(regression_model.AE.parameters(),lr=opt.lr,betas=(0.5,0.9))
optimizerR_mask = torch.optim.Adam([regression_model.mask_tanh],lr=1e-1,betas=(0.5,0.9))
regression_model.AE.train()
recorder = Recorder(opt)
process = train_step
warm_up_epoch = 100
for epoch in range(warm_up_epoch):
process(regression_model, optimizerR, optimizerR_mask, data_now, recorder, epoch, opt, warm_up=True)
for epoch in range(opt.epoch):
process(regression_model, optimizerR, optimizerR_mask, data_now, recorder, epoch, opt)
opt.trained_regression_model = regression_model
return recorder, opt
def get_range(opt, init_mask):
test_dataloader = opt.re_dataloader_total_fixed
inversion_engine = RegressionModel(opt, init_mask).to(opt.device)
features_list = []
features_list_class = [[] for i in range(opt.num_classes)]
for batch_idx, (inputs, labels) in enumerate(test_dataloader):
inputs = inputs.to(opt.device)
out, features = inversion_engine.forward_ori(inputs,opt)
print(torch.argmax(out,dim=1))
features_list.append(features)
for i in range(inputs.shape[0]):
features_list_class[labels[i].item()].append(features[i].unsqueeze(0))
all_features = torch.cat(features_list,dim=0)
opt.all_features = all_features
print(all_features.shape)
del features_list
del test_dataloader
weight_map_class = []
for i in range(opt.num_classes):
feature_mean_class = torch.cat(features_list_class[i],dim=0).mean(0)
weight_map_class.append(feature_mean_class)
opt.weight_map_class = weight_map_class
del all_features
del features_list_class
def train_step(regression_model, optimizerR, optimizerR_mask, data_now, recorder, epoch, opt, warm_up=False):
print("Epoch {} - Label: {} | {} - {}:".format(epoch, opt.target_label, opt.dataset, opt.attack_mode))
cross_entropy = nn.CrossEntropyLoss()
total_pred = 0
true_pred = 0
loss_ce_list = []
loss_dist_list = []
loss_list = []
acc_list = []
p_loss_list = []
loss_mask_norm_list = []
loss_std_list = []
for inputs in data_now:
regression_model.AE.train()
regression_model.mask_tanh.requires_grad = False
optimizerR.zero_grad()
inputs = inputs.to(opt.device)
sample_num = inputs.shape[0]
total_pred += sample_num
target_labels = torch.ones((sample_num), dtype=torch.int64).to(opt.device) * opt.target_label
if warm_up:
predictions, features, x_before_ae, x_after_ae = regression_model.forward_ae(inputs,opt)
else:
predictions, features, x_before_ae, x_after_ae, features_ori = regression_model.forward_ae_mask_p(inputs,opt)
loss_ce = cross_entropy(predictions, target_labels)
mse_loss = torch.nn.MSELoss(size_average = True).cuda()(x_after_ae,x_before_ae)
if warm_up:
dist_loss = torch.cosine_similarity(opt.weight_map_class[opt.target_label].reshape(-1),features.mean(0).reshape(-1),dim=0)
else:
dist_loss = torch.cosine_similarity(opt.weight_map_class[opt.target_label].reshape(-1),features_ori.mean(0).reshape(-1),dim=0)
acc_list_ = []
minibatch_accuracy_ = torch.sum(torch.argmax(predictions, dim=1) == target_labels).detach() / sample_num
acc_list_.append(minibatch_accuracy_)
acc_list_ = torch.stack(acc_list_)
avg_acc_G = torch.mean(acc_list_)
acc_list.append(minibatch_accuracy_)
p_loss = mse_loss
p_loss_bound = opt.p_loss_bound
loss_std_bound = opt.loss_std_bound
atk_succ_threshold = opt.ae_atk_succ_t
if opt.ignore_dist:
dist_loss = dist_loss*0
if warm_up:
if (p_loss>p_loss_bound):
total_loss = loss_ce + p_loss*100
else:
total_loss = loss_ce
else:
loss_std = (features_ori*regression_model.get_raw_mask(opt)).std(0).sum()
loss_std = loss_std/(torch.norm(regression_model.get_raw_mask(opt), 1))
total_loss = dist_loss*5
if dist_loss<0:
total_loss = total_loss - dist_loss*5
if loss_std>loss_std_bound:
total_loss = total_loss + loss_std*10*(1+opt.weight_std)
if (p_loss>p_loss_bound):
total_loss = total_loss + p_loss*10*(1+opt.weight_p)
if avg_acc_G.item()<atk_succ_threshold:
total_loss = total_loss + 1*loss_ce*(1+opt.weight_acc)
total_loss.backward()
optimizerR.step()
mask_norm_bound = int(reduce(lambda x,y:x*y,opt.feature_shape)*opt.mask_size)
if not warm_up:
for k in range(1):
regression_model.AE.eval()
regression_model.mask_tanh.requires_grad = True
optimizerR_mask.zero_grad()
predictions, features, x_before_ae, x_after_ae, features_ori = regression_model.forward_ae_mask_p(inputs,opt)
loss_mask_ce = cross_entropy(predictions, target_labels)
loss_mask_norm = torch.norm(regression_model.get_raw_mask(opt), opt.use_norm)
loss_mask_total = loss_mask_ce
if loss_mask_norm>mask_norm_bound:
loss_mask_total = loss_mask_total + loss_mask_norm
loss_mask_total.backward()
optimizerR_mask.step()
loss_ce_list.append(loss_ce.detach())
loss_dist_list.append(dist_loss.detach())
loss_list.append(total_loss.detach())
true_pred += torch.sum(torch.argmax(predictions, dim=1) == target_labels).detach()
if not warm_up:
p_loss_list.append(p_loss)
loss_mask_norm_list.append(loss_mask_norm)
loss_std_list.append(loss_std)
loss_ce_list = torch.stack(loss_ce_list)
loss_dist_list = torch.stack(loss_dist_list)
loss_list = torch.stack(loss_list)
acc_list = torch.stack(acc_list)
avg_loss_ce = torch.mean(loss_ce_list)
avg_loss_dist = torch.mean(loss_dist_list)
avg_loss = torch.mean(loss_list)
avg_acc = torch.mean(acc_list)
if not warm_up:
p_loss_list = torch.stack(p_loss_list)
loss_mask_norm_list = torch.stack(loss_mask_norm_list)
loss_std_list = torch.stack(loss_std_list)
avg_p_loss = torch.mean(p_loss_list)
avg_loss_mask_norm = torch.mean(loss_mask_norm_list)
avg_loss_std = torch.mean(loss_std_list)
print("avg_ce_loss:",avg_loss_ce)
print("avg_asr:",avg_acc)
print("avg_p_loss:",avg_p_loss)
print("avg_loss_mask_norm:",avg_loss_mask_norm)
print("avg_loss_std:",avg_loss_std)
if avg_acc.item()<atk_succ_threshold:
print("@avg_asr lower than bound")
if avg_p_loss>1.0*p_loss_bound:
print("@avg_p_loss larger than bound")
if avg_loss_mask_norm>1.0*mask_norm_bound:
print("@avg_loss_mask_norm larger than bound")
if avg_loss_std>1.0*loss_std_bound:
print("@avg_loss_std larger than bound")
mixed_value = avg_loss_dist.detach() - avg_acc + max(avg_p_loss.detach()-p_loss_bound,0)/p_loss_bound + max(avg_loss_mask_norm.detach()-mask_norm_bound,0)/mask_norm_bound + max(avg_loss_std.detach()-loss_std_bound,0)/loss_std_bound
print("mixed_value:",mixed_value)
if mixed_value < recorder.mixed_value_best:
recorder.mixed_value_best = mixed_value
opt.weight_p = max(avg_p_loss.detach()-p_loss_bound,0)/p_loss_bound
opt.weight_acc = max(atk_succ_threshold-avg_acc,0)/atk_succ_threshold
opt.weight_std = max(avg_loss_std.detach()-loss_std_bound,0)/loss_std_bound
print(
" Result: ASR: {:.3f} | Cross Entropy Loss: {:.6f} | Dist Loss: {:.6f} | Mixed_value best: {:.6f}".format(
true_pred * 100.0 / total_pred, avg_loss_ce, avg_loss_dist, recorder.mixed_value_best
)
)
recorder.final_asr = avg_acc
return avg_acc
if __name__ == "__main__":
pass
| 19,453 | 35.914611 | 239 | py |
FeatureRE | FeatureRE-main/unet_blocks.py | """
Class definitions for a standard U-Net Up-and Down-sampling blocks
http://arxiv.org/abs/1505.0.397
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class EncoderBlock(nn.Module):
"""
Instances the Encoder block that forms a part of a U-Net
Parameters:
in_channels (int): Depth (or number of channels) of the tensor that the block acts on
filter_num (int) : Number of filters used in the convolution ops inside the block,
depth of the output of the enc block
dropout(bool) : Flag to decide whether a dropout layer should be applied
dropout_rate (float) : Probability of dropping a convolution output feature channel
"""
def __init__(self, filter_num=64, in_channels=1, dropout=False, dropout_rate=0.3):
super(EncoderBlock,self).__init__()
self.filter_num = int(filter_num)
self.in_channels = int(in_channels)
self.dropout = dropout
self.dropout_rate = dropout_rate
self.conv1 = nn.Conv2d(in_channels=self.in_channels,
out_channels=self.filter_num,
kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(in_channels=self.filter_num,
out_channels=self.filter_num,
kernel_size=3,
padding=1)
self.bn_op_1 = nn.InstanceNorm2d(num_features=self.filter_num, affine=True)
self.bn_op_2 = nn.InstanceNorm2d(num_features=self.filter_num, affine=True)
# Use Dropout ops as nn.Module instead of nn.functional definition
# So using .train() and .eval() flags, can modify their behavior for MC-Dropout
if dropout is True:
self.dropout_1 = nn.Dropout(p=dropout_rate)
self.dropout_2 = nn.Dropout(p=dropout_rate)
def apply_manual_dropout_mask(self, x, seed):
# Mask size : [Batch_size, Channels, Height, Width]
dropout_mask = torch.bernoulli(input=torch.empty(x.shape[0], x.shape[1], x.shape[2], x.shape[3]).fill_(self.dropout_rate),
generator=torch.Generator().manual_seed(seed))
x = x*dropout_mask.to(x.device)
return x
def forward(self, x, seeds=None):
if seeds is not None:
assert(seeds.shape[0] == 2)
x = self.conv1(x)
x = self.bn_op_1(x)
x = F.leaky_relu(x)
if self.dropout is True:
if seeds is None:
x = self.dropout_1(x)
else:
x = self.apply_manual_dropout_mask(x, seeds[0].item())
x = self.conv2(x)
x = self.bn_op_2(x)
x = F.leaky_relu(x)
if self.dropout is True:
if seeds is None:
x = self.dropout_2(x)
else:
x = self.apply_manual_dropout_mask(x, seeds[1].item())
return x
class DecoderBlock(nn.Module):
"""
Decoder block used in the U-Net
Parameters:
in_channels (int) : Number of channels of the incoming tensor for the upsampling op
concat_layer_depth (int) : Number of channels to be concatenated via skip connections
filter_num (int) : Number of filters used in convolution, the depth of the output of the dec block
interpolate (bool) : Decides if upsampling needs to performed via interpolation or transposed convolution
dropout(bool) : Flag to decide whether a dropout layer should be applied
dropout_rate (float) : Probability of dropping a convolution output feature channel
"""
def __init__(self, in_channels, concat_layer_depth, filter_num, interpolate=False, dropout=False, dropout_rate=0.3):
# Up-sampling (interpolation or transposed conv) --> EncoderBlock
super(DecoderBlock, self).__init__()
self.filter_num = int(filter_num)
self.in_channels = int(in_channels)
self.concat_layer_depth = int(concat_layer_depth)
self.interpolate = interpolate
self.dropout = dropout
self.dropout_rate = dropout_rate
# Upsample by interpolation followed by a 3x3 convolution to obtain desired depth
self.up_sample_interpolate = nn.Sequential(nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=True),
nn.Conv2d(in_channels=self.in_channels,
out_channels=self.in_channels,
kernel_size=3,
padding=1)
)
# Upsample via transposed convolution (know to produce artifacts)
self.up_sample_tranposed = nn.ConvTranspose2d(in_channels=self.in_channels,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
output_padding=1)
self.down_sample = EncoderBlock(in_channels=self.in_channels+self.concat_layer_depth,
filter_num=self.filter_num,
dropout=self.dropout,
dropout_rate=self.dropout_rate)
def forward(self, x, skip_layer, seeds=None):
if self.interpolate is True:
up_sample_out = F.leaky_relu(self.up_sample_interpolate(x))
else:
up_sample_out = F.leaky_relu(self.up_sample_tranposed(x))
merged_out = torch.cat([up_sample_out, skip_layer], dim=1)
out = self.down_sample(merged_out, seeds=seeds)
return out
class EncoderBlock3D(nn.Module):
"""
Instances the 3D Encoder block that forms a part of a 3D U-Net
Parameters:
in_channels (int): Depth (or number of channels) of the tensor that the block acts on
filter_num (int) : Number of filters used in the convolution ops inside the block,
depth of the output of the enc block
"""
def __init__(self, filter_num=64, in_channels=1, dropout=False):
super(EncoderBlock3D, self).__init__()
self.filter_num = int(filter_num)
self.in_channels = int(in_channels)
self.dropout = dropout
self.conv1 = nn.Conv3d(in_channels=self.in_channels,
out_channels=self.filter_num,
kernel_size=3,
padding=1)
self.conv2 = nn.Conv3d(in_channels=self.filter_num,
out_channels=self.filter_num*2,
kernel_size=3,
padding=1)
self.bn_op_1 = nn.InstanceNorm3d(num_features=self.filter_num)
self.bn_op_2 = nn.InstanceNorm3d(num_features=self.filter_num*2)
def forward(self, x):
x = self.conv1(x)
x = self.bn_op_1(x)
x = F.leaky_relu(x)
if self.dropout is True:
x = F.dropout3d(x, p=0.3)
x = self.conv2(x)
x = self.bn_op_2(x)
x = F.leaky_relu(x)
if self.dropout is True:
x = F.dropout3d(x, p=0.3)
return x
class DecoderBlock3D(nn.Module):
"""
Decoder block used in the 3D U-Net
Parameters:
in_channels (int) : Number of channels of the incoming tensor for the upsampling op
concat_layer_depth (int) : Number of channels to be concatenated via skip connections
filter_num (int) : Number of filters used in convolution, the depth of the output of the dec block
interpolate (bool) : Decides if upsampling needs to performed via interpolation or transposed convolution
"""
def __init__(self, in_channels, concat_layer_depth, filter_num, interpolate=False, dropout=False):
super(DecoderBlock3D, self).__init__()
self.filter_num = int(filter_num)
self.in_channels = int(in_channels)
self.concat_layer_depth = int(concat_layer_depth)
self.interpolate = interpolate
self.dropout = dropout
# Upsample by interpolation followed by a 3x3x3 convolution to obtain desired depth
self.up_sample_interpolate = nn.Sequential(nn.Upsample(scale_factor=2,
mode='nearest'),
nn.Conv3d(in_channels=self.in_channels,
out_channels=self.in_channels,
kernel_size=3,
padding=1)
)
# Upsample via transposed convolution (know to produce artifacts)
self.up_sample_transposed = nn.ConvTranspose3d(in_channels=self.in_channels,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
output_padding=1)
if self.dropout is True:
self.down_sample = nn.Sequential(nn.Conv3d(in_channels=self.in_channels+self.concat_layer_depth,
out_channels=self.filter_num,
kernel_size=3,
padding=1),
nn.InstanceNorm3d(num_features=self.filter_num),
nn.LeakyReLU(),
nn.Dropout3d(p=0.3),
nn.Conv3d(in_channels=self.filter_num,
out_channels=self.filter_num,
kernel_size=3,
padding=1),
nn.InstanceNorm3d(num_features=self.filter_num),
nn.LeakyReLU(),
nn.Dropout3d(p=0.3))
else:
self.down_sample = nn.Sequential(nn.Conv3d(in_channels=self.in_channels+self.concat_layer_depth,
out_channels=self.filter_num,
kernel_size=3,
padding=1),
nn.InstanceNorm3d(num_features=self.filter_num),
nn.LeakyReLU(),
nn.Conv3d(in_channels=self.filter_num,
out_channels=self.filter_num,
kernel_size=3,
padding=1),
nn.InstanceNorm3d(num_features=self.filter_num),
nn.LeakyReLU())
def forward(self, x, skip_layer):
if self.interpolate is True:
up_sample_out = F.leaky_relu(self.up_sample_interpolate(x))
else:
up_sample_out = F.leaky_relu(self.up_sample_transposed(x))
merged_out = torch.cat([up_sample_out, skip_layer], dim=1)
out = self.down_sample(merged_out)
return out | 12,064 | 43.356618 | 130 | py |
FeatureRE | FeatureRE-main/unet_model.py | """
A PyTorch Implementation of a U-Net.
Supports 2D (https://arxiv.org/abs/1505.04597) and 3D(https://arxiv.org/abs/1606.06650) variants
Author: Ishaan Bhat
Email: ishaan@isi.uu.nl
"""
from unet_blocks import *
from math import pow
class UNet(nn.Module):
"""
PyTorch class definition for the U-Net architecture for image segmentation
Parameters:
n_channels (int) : Number of image channels
base_filter_num (int) : Number of filters for the first convolution (doubled for every subsequent block)
num_blocks (int) : Number of encoder/decoder blocks
num_classes(int) : Number of classes that need to be segmented
mode (str): 2D or 3D
use_pooling (bool): Set to 'True' to use MaxPool as downnsampling op.
If 'False', strided convolution would be used to downsample feature maps (http://arxiv.org/abs/1908.02182)
dropout (bool) : Whether dropout should be added to central encoder and decoder blocks (eg: BayesianSegNet)
dropout_rate (float) : Dropout probability
Returns:
out (torch.Tensor) : Prediction of the segmentation map
"""
def __init__(self, n_channels=1, base_filter_num=64, num_blocks=4, num_classes=5, mode='2D', dropout=False, dropout_rate=0.3, use_pooling=True):
super(UNet, self).__init__()
self.contracting_path = nn.ModuleList()
self.expanding_path = nn.ModuleList()
self.downsampling_ops = nn.ModuleList()
self.num_blocks = num_blocks
self.n_channels = int(n_channels)
self.n_classes = int(num_classes)
self.base_filter_num = int(base_filter_num)
self.enc_layer_depths = [] # Keep track of the output depths of each encoder block
self.mode = mode
self.pooling = use_pooling
self.dropout = dropout
self.dropout_rate = dropout_rate
if mode == '2D':
self.encoder = EncoderBlock
self.decoder = DecoderBlock
self.pool = nn.MaxPool2d
elif mode == '3D':
self.encoder = EncoderBlock3D
self.decoder = DecoderBlock3D
self.pool = nn.MaxPool3d
else:
print('{} mode is invalid'.format(mode))
for block_id in range(num_blocks):
# Due to GPU mem constraints, we cap the filter depth at 512
enc_block_filter_num = min(int(pow(2, block_id)*self.base_filter_num), 512) # Output depth of current encoder stage of the 2-D variant
if block_id == 0:
enc_in_channels = self.n_channels
else:
if self.mode == '2D':
if int(pow(2, block_id)*self.base_filter_num) <= 512:
enc_in_channels = enc_block_filter_num//2
else:
enc_in_channels = 512
else:
enc_in_channels = enc_block_filter_num # In the 3D UNet arch, the encoder features double in the 2nd convolution op
# Dropout only applied to central encoder blocks -- See BayesianSegNet by Kendall et al.
if self.dropout is True and block_id >= num_blocks-2:
self.contracting_path.append(self.encoder(in_channels=enc_in_channels,
filter_num=enc_block_filter_num,
dropout=True,
dropout_rate=self.dropout_rate))
else:
self.contracting_path.append(self.encoder(in_channels=enc_in_channels,
filter_num=enc_block_filter_num,
dropout=False))
if self.mode == '2D':
self.enc_layer_depths.append(enc_block_filter_num)
if self.pooling is False:
self.downsampling_ops.append(nn.Sequential(nn.Conv2d(in_channels=self.enc_layer_depths[-1],
out_channels=self.enc_layer_depths[-1],
kernel_size=3,
stride=2,
padding=1),
nn.InstanceNorm2d(num_features=self.filter_num),
nn.LeakyReLU()))
else:
self.enc_layer_depths.append(enc_block_filter_num*2) # Specific to 3D U-Net architecture (due to doubling of #feature_maps inside the 3-D Encoder)
if self.pooling is False:
self.downsampling_ops.append(nn.Sequential(nn.Conv3d(in_channels=self.enc_layer_depths[-1],
out_channels=self.enc_layer_depths[-1],
kernel_size=3,
stride=2,
padding=1),
nn.InstanceNorm3d(num_features=self.enc_layer_depths[-1]),
nn.LeakyReLU()))
# Bottleneck layer
if self.mode == '2D':
bottle_neck_filter_num = self.enc_layer_depths[-1]*2
bottle_neck_in_channels = self.enc_layer_depths[-1]
self.bottle_neck_layer = self.encoder(filter_num=bottle_neck_filter_num,
in_channels=bottle_neck_in_channels)
else: # Modified for the 3D UNet architecture
bottle_neck_in_channels = self.enc_layer_depths[-1]
bottle_neck_filter_num = self.enc_layer_depths[-1]*2
self.bottle_neck_layer = nn.Sequential(nn.Conv3d(in_channels=bottle_neck_in_channels,
out_channels=bottle_neck_in_channels,
kernel_size=3,
padding=1),
nn.InstanceNorm3d(num_features=bottle_neck_in_channels),
nn.LeakyReLU(),
nn.Conv3d(in_channels=bottle_neck_in_channels,
out_channels=bottle_neck_filter_num,
kernel_size=3,
padding=1),
nn.InstanceNorm3d(num_features=bottle_neck_filter_num),
nn.LeakyReLU())
# Decoder Path
dec_in_channels = int(bottle_neck_filter_num)
for block_id in range(num_blocks):
if self.dropout is True and block_id < 2:
self.expanding_path.append(self.decoder(in_channels=dec_in_channels,
filter_num=self.enc_layer_depths[-1-block_id],
concat_layer_depth=self.enc_layer_depths[-1-block_id],
interpolate=False,
dropout=True,
dropout_rate=self.dropout_rate))
else:
self.expanding_path.append(self.decoder(in_channels=dec_in_channels,
filter_num=self.enc_layer_depths[-1-block_id],
concat_layer_depth=self.enc_layer_depths[-1-block_id],
interpolate=False,
dropout=False))
dec_in_channels = self.enc_layer_depths[-1-block_id]
# Output Layer
if mode == '2D':
self.output = nn.Conv2d(in_channels=int(self.enc_layer_depths[0]),
out_channels=self.n_classes,
kernel_size=1)
else:
self.output = nn.Conv3d(in_channels=int(self.enc_layer_depths[0]),
out_channels=self.n_classes,
kernel_size=1)
def forward(self, x, seeds=None):
if self.mode == '2D':
h, w = x.shape[-2:]
else:
d, h, w = x.shape[-3:]
# Encoder
enc_outputs = []
seed_index = 0
for stage, enc_op in enumerate(self.contracting_path):
if stage >= len(self.contracting_path) - 2:
if seeds is not None:
x = enc_op(x, seeds[seed_index:seed_index+2])
else:
x = enc_op(x)
seed_index += 2 # 2 seeds required per block
else:
x = enc_op(x)
enc_outputs.append(x)
if self.pooling is True:
x = self.pool(kernel_size=2)(x)
else:
x = self.downsampling_ops[stage](x)
# Bottle-neck layer
x = self.bottle_neck_layer(x)
# Decoder
for block_id, dec_op in enumerate(self.expanding_path):
if block_id < 2:
if seeds is not None:
x = dec_op(x, enc_outputs[-1-block_id], seeds[seed_index:seed_index+2])
else:
x = dec_op(x, enc_outputs[-1-block_id])
seed_index += 2
else:
x = dec_op(x, enc_outputs[-1-block_id])
# Output
x = self.output(x)
return x | 10,176 | 49.381188 | 162 | py |
FeatureRE | FeatureRE-main/dataloader.py | import torch.utils.data as data
import torch
import torchvision
import torchvision.transforms as transforms
import os
import csv
import random
import numpy as np
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset
from io import BytesIO
def get_transform(opt, train=True, pretensor_transform=False):
add_nad_transform = False
transforms_list = []
transforms_list.append(transforms.Resize((opt.input_height, opt.input_width)))
if pretensor_transform:
if train:
transforms_list.append(transforms.RandomCrop((opt.input_height, opt.input_width), padding=opt.random_crop))
transforms_list.append(transforms.RandomRotation(opt.random_rotation))
if opt.dataset == "cifar10":
transforms_list.append(transforms.RandomHorizontalFlip(p=0.5))
if add_nad_transform:
transforms_list.append(transforms.RandomCrop(opt.input_height, padding=4))
transforms_list.append(transforms.RandomHorizontalFlip())
transforms_list.append(transforms.ToTensor())
if opt.dataset == "cifar10":
transforms_list.append(transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "mnist":
transforms_list.append(transforms.Normalize([0.1307], [0.3081]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "gtsrb" or opt.dataset == "celeba":
transforms_list.append(transforms.Normalize((0.3403, 0.3121, 0.3214),(0.2724, 0.2608, 0.2669)))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "imagenet":
transforms_list.append(transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
else:
raise Exception("Invalid Dataset")
return transforms.Compose(transforms_list)
class GTSRB(data.Dataset):
def __init__(self, opt, train, transforms):
super(GTSRB, self).__init__()
if train:
self.data_folder = os.path.join(opt.data_root, "GTSRB/Train")
self.images, self.labels = self._get_data_train_list()
else:
self.data_folder = os.path.join(opt.data_root, "GTSRB/Test")
self.images, self.labels = self._get_data_test_list()
self.transforms = transforms
def _get_data_train_list(self):
images = []
labels = []
for c in range(0, 43):
prefix = self.data_folder + "/" + format(c, "05d") + "/"
gtFile = open(prefix + "GT-" + format(c, "05d") + ".csv")
gtReader = csv.reader(gtFile, delimiter=";")
next(gtReader)
for row in gtReader:
images.append(prefix + row[0])
labels.append(int(row[7]))
gtFile.close()
return images, labels
def _get_data_test_list(self):
images = []
labels = []
prefix = os.path.join(self.data_folder, "GT-final_test.csv")
gtFile = open(prefix)
gtReader = csv.reader(gtFile, delimiter=";")
next(gtReader)
for row in gtReader:
images.append(self.data_folder + "/" + row[0])
labels.append(int(row[7]))
return images, labels
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = Image.open(self.images[index])
image = self.transforms(image)
label = self.labels[index]
return image, label
def get_dataloader_partial_split(opt, train_fraction=0.1, train=True, pretensor_transform=False,shuffle=True,return_index = False):
data_fraction = train_fraction
transform_train = get_transform(opt, True, pretensor_transform)
transform_test = get_transform(opt, False, pretensor_transform)
transform = transform_train
if opt.dataset == "gtsrb":
dataset = GTSRB(opt, train, transform_train)
dataset_test = GTSRB(opt, train, transform_test)
class_num=43
elif opt.dataset == "mnist":
dataset = torchvision.datasets.MNIST(opt.data_root, train, transform=transform_train, download=True)
dataset_test = torchvision.datasets.MNIST(opt.data_root, train, transform=transform_test, download=True)
class_num=10
elif opt.dataset == "cifar10":
dataset = torchvision.datasets.CIFAR10(opt.data_root, train, transform=transform_train, download=True)
dataset_test = torchvision.datasets.CIFAR10(opt.data_root, train, transform=transform_test, download=True)
class_num=10
elif opt.dataset == "celeba":
if train:
split = "train"
else:
split = "test"
dataset = CelebA_attr(opt, split, transform)
class_num=8
elif opt.dataset == "imagenet":
if train==True:
file_dir = "/workspace/data/imagenet/train"
elif train==False:
file_dir = "/workspace/data/imagenet/val"
dataset = torchvision.datasets.ImageFolder(
file_dir,
transform
)
dataset_test = torchvision.datasets.ImageFolder(
file_dir,
transform
)
class_num=1000
else:
raise Exception("Invalid dataset")
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.bs, num_workers=opt.num_workers, shuffle=True)
#finetuneset = torch.utils.data.Subset(dataset, range(0,dataset.__len__(),int(1/data_fraction)))
dataloader_total = torch.utils.data.DataLoader(dataset, batch_size=1, pin_memory=True,num_workers=opt.num_workers, shuffle=False)
idx = []
counter = [0]*class_num
for batch_idx, (inputs, targets) in enumerate(dataloader_total):
if counter[targets.item()]<int(dataset.__len__()*data_fraction/class_num):
idx.append(batch_idx)
counter[targets.item()] = counter[targets.item()] + 1
del dataloader_total
trainset = torch.utils.data.Subset(dataset,idx)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.bs,pin_memory=True, num_workers=opt.num_workers, shuffle=shuffle)
test_idx = list(set(range(dataset.__len__())) - set(idx))
testset = torch.utils.data.Subset(dataset_test,test_idx)
testloader = torch.utils.data.DataLoader(testset, batch_size=opt.bs,pin_memory=True, num_workers=opt.num_workers, shuffle=shuffle)
if return_index:
return trainset, transform, trainloader, testset, testloader,idx,test_idx
else:
return trainset, transform, trainloader, testset, testloader
def get_dataloader_label_partial(opt, dataset_total, label=0):
dataloader_total = torch.utils.data.DataLoader(dataset_total, batch_size=1,pin_memory=True,num_workers=opt.num_workers, shuffle=False)
idx = []
for batch_idx, (inputs, targets) in enumerate(dataloader_total):
if targets.item() == label:
idx.append(batch_idx)
del dataloader_total
class_dataset = torch.utils.data.Subset(dataset_total,idx)
dataloader_class = torch.utils.data.DataLoader(class_dataset, batch_size=opt.bs,pin_memory=True,num_workers=opt.num_workers, shuffle=True)
return dataloader_class
def get_dataloader_label_remove(opt, dataset_total, label=0, idx=None):
dataloader_total = torch.utils.data.DataLoader(dataset_total, batch_size=1,pin_memory=True,num_workers=opt.num_workers, shuffle=False)
if idx is None:
idx = []
for batch_idx, (inputs, targets) in enumerate(dataloader_total):
#_input, _label = self.model.get_data(data)
#_input = _input.view(1, _input.shape[0], _input.shape[1], _input.shape[2])
if targets.item() != label:
idx.append(batch_idx)
del dataloader_total
class_dataset = torch.utils.data.Subset(dataset_total,idx)
dataloader_class = torch.utils.data.DataLoader(class_dataset, batch_size=opt.bs,pin_memory=True,num_workers=opt.num_workers, shuffle=True)
return dataloader_class
def main():
pass
if __name__ == "__main__":
main()
| 8,265 | 37.990566 | 142 | py |
FeatureRE | FeatureRE-main/models.py | import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torch.nn import Module
from torchvision import transforms
from .blocks import *
class Normalize:
def __init__(self, opt, expected_values, variance):
self.n_channels = opt.input_channel
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = (x[:, channel] - self.expected_values[channel]) / self.variance[channel]
return x_clone
class Denormalize:
def __init__(self, opt, expected_values, variance):
self.n_channels = opt.input_channel
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
'''print(x)
print(self.variance)
print(self.expected_values)'''
x_clone = x.clone()
for channel in range(self.n_channels):
#print(channel)
x_clone[:, channel] = x[:, channel] * self.variance[channel] + self.expected_values[channel]
'''print(x[:, channel])
print(x[:, channel] * self.variance[channel] + self.expected_values[channel])
print(x_clone[:, channel])
print("DONE FOR")'''
'''print(x_clone)
print("done Denormalize")'''
return x_clone
class Normalizer:
def __init__(self, opt):
self.normalizer = self._get_normalizer(opt)
def _get_normalizer(self, opt):
if opt.dataset == "cifar10":
normalizer = Normalize(opt, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif opt.dataset == "mnist":
normalizer = Normalize(opt, [0.5], [0.5])
elif opt.dataset == "gtsrb" or opt.dataset == "celeba":
normalizer = None
elif opt.dataset in ["imagenet_sub200"]:
normalizer = Normalize(opt, [0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262])
#expected_values = [0.4802, 0.4481, 0.3975]
#variance = [0.2302, 0.2265, 0.2262]
else:
raise Exception("Invalid dataset")
return normalizer
def __call__(self, x):
if self.normalizer:
x = self.normalizer(x)
return x
class Denormalizer:
def __init__(self, opt):
self.denormalizer = self._get_denormalizer(opt)
def _get_denormalizer(self, opt):
print(opt.dataset)
if opt.dataset == "cifar10":
denormalizer = Denormalize(opt, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif opt.dataset == "mnist":
denormalizer = Denormalize(opt, [0.5], [0.5])
elif opt.dataset == "gtsrb" or opt.dataset == "celeba":
denormalizer = None
elif opt.dataset in ["imagenet_sub200"]:
denormalizer = Denormalize(opt, [0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262])
else:
raise Exception("Invalid dataset")
return denormalizer
def __call__(self, x):
if self.denormalizer:
x = self.denormalizer(x)
return x
# ---------------------------- Classifiers ----------------------------#
class MNISTBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(MNISTBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.ind = None
def forward(self, x):
return self.conv1(F.relu(self.bn1(x)))
class NetC_MNIST(nn.Module):
def __init__(self):
super(NetC_MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 32, (3, 3), 2, 1) # 14
self.relu1 = nn.ReLU(inplace=True)
self.layer2 = MNISTBlock(32, 64, 2) # 7
self.layer3 = MNISTBlock(64, 64, 2) # 4
self.flatten = nn.Flatten()
self.linear6 = nn.Linear(64 * 4 * 4, 512)
self.relu7 = nn.ReLU(inplace=True)
self.dropout8 = nn.Dropout(0.3)
self.linear9 = nn.Linear(512, 10)
def forward(self, x):
for module in self.children():
x = module(x)
return x
| 4,338 | 32.898438 | 106 | py |
FeatureRE | FeatureRE-main/resnet_nole.py | import torch.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1):
# 3x3 convolution with padding
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
# Added another relu here
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
# Modified to use relu2
out = self.relu2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10,in_channels=3):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(kernel_size=4)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def from_input_to_features(self, x, index):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def from_features_to_output(self, x, index):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def resnet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) | 5,495 | 29.703911 | 109 | py |
FeatureRE | FeatureRE-main/detection.py | from reverse_engineering import *
from config import get_argument
from dataloader import get_dataloader_label_remove, get_dataloader_partial_split
import time
def main():
start_time = time.time()
opt = get_argument().parse_args()
if opt.dataset == "cifar10":
opt.input_height = 32
opt.input_width = 32
opt.input_channel = 3
opt.num_classes = 10
opt.total_label = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
size = 32
channel = 3
opt.t_mean = torch.FloatTensor(mean).view(channel,1,1).expand(channel, size, size).cuda()
opt.t_std = torch.FloatTensor(std).view(channel,1,1).expand(channel, size, size).cuda()
elif opt.dataset == "gtsrb":
opt.input_height = 32
opt.input_width = 32
opt.input_channel = 3
opt.num_classes = 43
opt.total_label = 43
mean = [0,0,0]
std = [1,1,1]
size = opt.input_height
channel = 3
opt.t_mean = torch.FloatTensor(mean).view(channel,1,1).expand(channel, size, size).cuda()
opt.t_std = torch.FloatTensor(std).view(channel,1,1).expand(channel, size, size).cuda()
elif opt.dataset == "mnist":
opt.input_height = 32
opt.input_width = 32
opt.input_channel = 1
opt.num_classes = 10
opt.total_label = 10
mean = [0.5]
std = [0.5]
size = 32
channel = 1
opt.t_mean = torch.FloatTensor(mean).view(channel,1,1).expand(channel, size, size).cuda()
opt.t_std = torch.FloatTensor(std).view(channel,1,1).expand(channel, size, size).cuda()
else:
raise Exception("Invalid Dataset")
trainset, transform, trainloader, testset, testloader = get_dataloader_partial_split(opt, train_fraction=opt.data_fraction, train=False)
opt.total_label = opt.num_classes
opt.re_dataset_total_fixed = trainset
opt.re_dataloader_total_fixed = trainloader
dummy_model = RegressionModel(opt, None).to(opt.device)
opt.feature_shape = []
for batch_idx, (inputs, labels) in enumerate(trainloader):
features = dummy_model.classifier.from_input_to_features(inputs.cuda(), opt.internal_index)
for i in range(1, len(features.shape)):
opt.feature_shape.append(features.shape[i])
break
del dummy_model
init_mask = torch.ones(opt.feature_shape)
opt.pretrain_AE = None
get_range(opt, init_mask)
final_mixed_value_list = []
if opt.set_all2one_target == "all":
for target in range(opt.num_classes):
print("----------------- Analyzing all2one: target{} -----------------".format(target))
opt.target_label = target
re_dataloader = get_dataloader_label_remove(opt,opt.re_dataset_total_fixed,label=opt.target_label)
data_list = []
for batch_idx, (inputs, labels) in enumerate(re_dataloader):
print(batch_idx)
print(inputs.shape)
data_list.append(inputs)
opt.data_now = data_list
recorder, opt = train(opt, init_mask)
final_mixed_value_list.append(recorder.mixed_value_best.item())
else:
target = int(opt.set_all2one_target)
print("----------------- Analyzing all2one: target{} -----------------".format(target))
opt.target_label = target
re_dataloader = get_dataloader_label_remove(opt,opt.re_dataset_total_fixed,label=opt.target_label)
data_list = []
for batch_idx, (inputs, labels) in enumerate(re_dataloader):
print(batch_idx)
print(inputs.shape)
data_list.append(inputs)
opt.data_now = data_list
recorder, opt = train(opt, init_mask)
final_mixed_value_list.append(recorder.mixed_value_best.item())
end_time = time.time()
print("total time: ",end_time-start_time)
print("final_mixed_value_list:",final_mixed_value_list)
if min(final_mixed_value_list) < opt.mixed_value_threshold:
print("Trojaned")
else:
print("Benign")
if __name__ == "__main__":
main()
| 4,155 | 36.107143 | 140 | py |
FeatureRE | FeatureRE-main/mitigation.py | from reverse_engineering import *
from config import get_argument
from dataloader import get_dataloader_label_remove, get_dataloader_partial_split
import time
def main():
start_time = time.time()
opt = get_argument().parse_args()
if opt.dataset == "cifar10":
opt.input_height = 32
opt.input_width = 32
opt.input_channel = 3
opt.num_classes = 10
opt.total_label = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
size = 32
channel = 3
opt.t_mean = torch.FloatTensor(mean).view(channel,1,1).expand(channel, size, size).cuda()
opt.t_std = torch.FloatTensor(std).view(channel,1,1).expand(channel, size, size).cuda()
elif opt.dataset == "gtsrb":
opt.input_height = 32
opt.input_width = 32
opt.input_channel = 3
opt.num_classes = 43
opt.total_label = 43
mean = [0,0,0]
std = [1,1,1]
size = opt.input_height
channel = 3
opt.t_mean = torch.FloatTensor(mean).view(channel,1,1).expand(channel, size, size).cuda()
opt.t_std = torch.FloatTensor(std).view(channel,1,1).expand(channel, size, size).cuda()
elif opt.dataset == "mnist":
opt.input_height = 32
opt.input_width = 32
opt.input_channel = 1
opt.num_classes = 10
opt.total_label = 10
mean = [0.5]
std = [0.5]
size = 32
channel = 1
opt.t_mean = torch.FloatTensor(mean).view(channel,1,1).expand(channel, size, size).cuda()
opt.t_std = torch.FloatTensor(std).view(channel,1,1).expand(channel, size, size).cuda()
else:
raise Exception("Invalid Dataset")
trainset, transform, trainloader, testset, testloader = get_dataloader_partial_split(opt, train_fraction=opt.data_fraction, train=False)
opt.total_label = opt.num_classes
opt.re_dataset_total_fixed = trainset
opt.re_dataloader_total_fixed = trainloader
dummy_model = RegressionModel(opt, None).to(opt.device)
opt.feature_shape = []
for batch_idx, (inputs, labels) in enumerate(trainloader):
features = dummy_model.classifier.from_input_to_features(inputs.cuda(), opt.internal_index)
for i in range(1, len(features.shape)):
opt.feature_shape.append(features.shape[i])
break
del dummy_model
init_mask = torch.ones(opt.feature_shape)
opt.pretrain_AE = None
get_range(opt, init_mask)
final_mixed_value_list = []
if opt.set_all2one_target:
target = int(opt.set_all2one_target)
print("----------------- Analyzing all2one: target{} -----------------".format(target))
opt.target_label = target
re_dataloader = get_dataloader_label_remove(opt,opt.re_dataset_total_fixed,label=opt.target_label)
data_list = []
for batch_idx, (inputs, labels) in enumerate(re_dataloader):
print(batch_idx)
print(inputs.shape)
data_list.append(inputs)
opt.data_now = data_list
recorder, opt = train(opt, init_mask)
testloader_fix_asr = get_dataloader_label_remove(opt,testset,label=opt.target_label)
fix_neuron_flip(opt,trainloader,testloader,testloader_fix_asr)
final_mixed_value_list.append(recorder.mixed_value_best)
end_time = time.time()
print("total time: ",end_time-start_time)
if __name__ == "__main__":
main()
| 3,433 | 35.924731 | 140 | py |
FeatureRE | FeatureRE-main/train_models/dataloader.py | import torch.utils.data as data
import torch
import torchvision
import torchvision.transforms as transforms
import os
import csv
import kornia.augmentation as A
import random
import numpy as np
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset
from natsort import natsorted
from io import BytesIO
class ToNumpy:
def __call__(self, x):
x = np.array(x)
if len(x.shape) == 2:
x = np.expand_dims(x, axis=2)
return x
class ProbTransform(torch.nn.Module):
def __init__(self, f, p=1):
super(ProbTransform, self).__init__()
self.f = f
self.p = p
def forward(self, x): # , **kwargs):
if random.random() < self.p:
return self.f(x)
else:
return x
def get_transform(opt, train=True, pretensor_transform=False):
add_nad_transform = False
if opt.dataset == "trojai":
return transforms.Compose([transforms.CenterCrop(opt.input_height),transforms.ToTensor()])
transforms_list = []
transforms_list.append(transforms.Resize((opt.input_height, opt.input_width)))
if pretensor_transform:
if train:
transforms_list.append(transforms.RandomCrop((opt.input_height, opt.input_width), padding=opt.random_crop))
transforms_list.append(transforms.RandomRotation(opt.random_rotation))
if opt.dataset == "cifar10":
transforms_list.append(transforms.RandomHorizontalFlip(p=0.5))
if add_nad_transform:
transforms_list.append(transforms.RandomCrop(opt.input_height, padding=4))
transforms_list.append(transforms.RandomHorizontalFlip())
transforms_list.append(transforms.ToTensor())
if (opt.set_arch is not None) and (("nole" in opt.set_arch) or ("mnist_lenet" in opt.set_arch)):
if opt.dataset == "cifar10":
transforms_list.append(transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "mnist":
transforms_list.append(transforms.Normalize([0.1307], [0.3081]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "gtsrb" or opt.dataset == "celeba":
transforms_list.append(transforms.Normalize((0.3403, 0.3121, 0.3214),(0.2724, 0.2608, 0.2669)))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "imagenet":
transforms_list.append(transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
else:
raise Exception("Invalid Dataset")
else:
if opt.dataset == "cifar10":
transforms_list.append(transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "mnist":
transforms_list.append(transforms.Normalize([0.5], [0.5]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
elif opt.dataset == "gtsrb" or opt.dataset == "celeba":
pass
elif opt.dataset == "imagenet":
transforms_list.append(transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))
if add_nad_transform:
transforms_list.append(Cutout(1,9))
else:
raise Exception("Invalid Dataset")
return transforms.Compose(transforms_list)
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
#print(img)
return img
class PostTensorTransform(torch.nn.Module):
def __init__(self, opt):
super(PostTensorTransform, self).__init__()
self.random_crop = ProbTransform(
A.RandomCrop((opt.input_height, opt.input_width), padding=opt.random_crop), p=0.8
)
self.random_rotation = ProbTransform(A.RandomRotation(opt.random_rotation), p=0.5)
if opt.dataset == "cifar10":
self.random_horizontal_flip = A.RandomHorizontalFlip(p=0.5)
def forward(self, x):
for module in self.children():
x = module(x)
return x
def get_dataloader(opt, train=True, pretensor_transform=False, shuffle=True, return_dataset = False):
transform = get_transform(opt, train, pretensor_transform)
if opt.dataset == "cifar10":
dataset = torchvision.datasets.CIFAR10(opt.data_root, train, transform=transform, download=True)
else:
raise Exception("Invalid dataset")
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.bs, num_workers=opt.num_workers, shuffle=shuffle)
if return_dataset:
return dataset, dataloader, transform
else:
return dataloader, transform
def get_dataloader_random_ratio(opt, train=True, pretensor_transform=False, shuffle=True):
transform = get_transform(opt, train, pretensor_transform)
if opt.dataset == "cifar10":
dataset = torchvision.datasets.CIFAR10(opt.data_root, train, transform=transform, download=True)
else:
raise Exception("Invalid dataset")
idx = random.sample(range(dataset.__len__()),int(dataset.__len__()*opt.random_ratio))
dataset = torch.utils.data.Subset(dataset,idx)
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.bs, num_workers=opt.num_workers, shuffle=shuffle)
return dataloader, transform
def main():
pass
if __name__ == "__main__":
main()
| 6,855 | 36.26087 | 119 | py |
FeatureRE | FeatureRE-main/train_models/train_model.py | import json
import os
import shutil
from time import time
import config
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchvision.transforms import RandomErasing
from dataloader import PostTensorTransform, get_dataloader,get_dataloader_random_ratio
from resnet_nole import *
import random
class Normalize:
def __init__(self, opt, expected_values, variance):
self.n_channels = opt.input_channel
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = (x[:, channel] - self.expected_values[channel]) / self.variance[channel]
return x_clone
class Denormalize:
def __init__(self, opt, expected_values, variance):
self.n_channels = opt.input_channel
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = x[:, channel] * self.variance[channel] + self.expected_values[channel]
return x_clone
class Normalizer:
def __init__(self, opt):
self.normalizer = self._get_normalizer(opt)
def _get_normalizer(self, opt):
if opt.dataset == "cifar10":
normalizer = Normalize(opt, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
else:
raise Exception("Invalid dataset")
return normalizer
def __call__(self, x):
if self.normalizer:
x = self.normalizer(x)
return x
class Denormalizer:
def __init__(self, opt):
self.denormalizer = self._get_denormalizer(opt)
def _get_denormalizer(self, opt):
print(opt.dataset)
if opt.dataset == "cifar10":
denormalizer = Denormalize(opt, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
else:
raise Exception("Invalid dataset")
return denormalizer
def __call__(self, x):
if self.denormalizer:
x = self.denormalizer(x)
return x
def get_model(opt):
netC = None
optimizerC = None
schedulerC = None
if opt.set_arch:
if opt.set_arch=="resnet18":
netC = resnet18(num_classes = opt.num_classes, in_channels = opt.input_channel)
netC = netC.to(opt.device)
optimizerC = torch.optim.SGD(netC.parameters(), opt.lr_C, momentum=0.9, weight_decay=5e-4)
schedulerC = torch.optim.lr_scheduler.MultiStepLR(optimizerC, opt.schedulerC_milestones, opt.schedulerC_lambda)
return netC, optimizerC, schedulerC
def train(train_transform, netC, optimizerC, schedulerC, train_dl, noise_grid, identity_grid, tf_writer, epoch, opt):
print(" Train:")
netC.train()
rate_bd = opt.pc
total_loss_ce = 0
total_sample = 0
total_clean = 0
total_bd = 0
total_cross = 0
total_clean_correct = 0
total_bd_correct = 0
total_cross_correct = 0
criterion_CE = torch.nn.CrossEntropyLoss()
criterion_BCE = torch.nn.BCELoss()
denormalizer = Denormalizer(opt)
transforms = PostTensorTransform(opt).to(opt.device)
total_time = 0
avg_acc_cross = 0
for batch_idx, (inputs, targets) in enumerate(train_dl):
optimizerC.zero_grad()
inputs, targets = inputs.to(opt.device), targets.to(opt.device)
bs = inputs.shape[0]
num_bd = int(bs * rate_bd)
num_cross = int(num_bd * opt.cross_ratio)
grid_temps = (identity_grid + opt.s * noise_grid / opt.input_height) * opt.grid_rescale
grid_temps = torch.clamp(grid_temps, -1, 1)
ins = torch.rand(num_cross, opt.input_height, opt.input_height, 2).to(opt.device) * 2 - 1
grid_temps2 = grid_temps.repeat(num_cross, 1, 1, 1) + ins / opt.input_height
grid_temps2 = torch.clamp(grid_temps2, -1, 1)
if num_bd!=0:
inputs_bd = F.grid_sample(inputs[:num_bd], grid_temps.repeat(num_bd, 1, 1, 1), align_corners=True)
if opt.attack_mode == "all2one":
targets_bd = torch.ones_like(targets[:num_bd]) * opt.target_label
if opt.attack_mode == "all2all":
targets_bd = torch.remainder(targets[:num_bd] + 1, opt.num_classes)
inputs_cross = F.grid_sample(inputs[num_bd : (num_bd + num_cross)], grid_temps2, align_corners=True)
if (num_bd==0 and num_cross==0):
total_inputs = inputs
total_targets = targets
else:
total_inputs = torch.cat([inputs_bd, inputs_cross, inputs[(num_bd + num_cross) :]], dim=0)
total_targets = torch.cat([targets_bd, targets[num_bd:]], dim=0)
total_inputs = transforms(total_inputs)
start = time()
total_preds = netC(total_inputs)
total_time += time() - start
loss_ce = criterion_CE(total_preds, total_targets)
loss = loss_ce
loss.backward()
optimizerC.step()
total_sample += bs
total_loss_ce += loss_ce.detach()
total_clean += bs - num_bd - num_cross
total_bd += num_bd
total_cross += num_cross
total_clean_correct += torch.sum(
torch.argmax(total_preds[(num_bd + num_cross) :], dim=1) == total_targets[(num_bd + num_cross) :]
)
if num_bd:
total_bd_correct += torch.sum(torch.argmax(total_preds[:num_bd], dim=1) == targets_bd)
avg_acc_bd = total_bd_correct * 100.0 / total_bd
else:
avg_acc_bd = 0
if num_cross:
total_cross_correct += torch.sum(
torch.argmax(total_preds[num_bd : (num_bd + num_cross)], dim=1)
== total_targets[num_bd : (num_bd + num_cross)]
)
avg_acc_cross = total_cross_correct * 100.0 / total_cross
else:
avg_acc_cross = 0
avg_acc_clean = total_clean_correct * 100.0 / total_clean
avg_loss_ce = total_loss_ce / total_sample
# Save image for debugging
if not batch_idx % 50:
if not os.path.exists(opt.temps):
os.makedirs(opt.temps)
path = os.path.join(opt.temps, "backdoor_image.png")
path_cross = os.path.join(opt.temps, "cross_image.png")
if num_bd>0:
torchvision.utils.save_image(inputs_bd, path, normalize=True)
if num_cross>0:
torchvision.utils.save_image(inputs_cross, path_cross, normalize=True)
if (num_bd>0 and num_cross==0):
print(
batch_idx,
len(train_dl),
"CE Loss: {:.4f} | Clean Acc: {:.4f} | Bd Acc: {:.4f}".format(
avg_loss_ce, avg_acc_clean, avg_acc_bd,
))
if (num_bd>0 and num_cross>0):
print(
batch_idx,
len(train_dl),
"CE Loss: {:.4f} | Clean Acc: {:.4f} | Bd Acc: {:.4f} | Cross Acc: {:.4f}".format(
avg_loss_ce, avg_acc_clean, avg_acc_bd, avg_acc_cross
))
else:
print(
batch_idx,
len(train_dl),
"CE Loss: {:.4f} | Clean Acc: {:.4f}".format(avg_loss_ce, avg_acc_clean))
# Image for tensorboard
if batch_idx == len(train_dl) - 2:
if num_bd>0:
residual = inputs_bd - inputs[:num_bd]
batch_img = torch.cat([inputs[:num_bd], inputs_bd, total_inputs[:num_bd], residual], dim=2)
batch_img = denormalizer(batch_img)
batch_img = F.upsample(batch_img, scale_factor=(4, 4))
grid = torchvision.utils.make_grid(batch_img, normalize=True)
path = os.path.join(opt.temps, "batch_img.png")
torchvision.utils.save_image(batch_img, path, normalize=True)
# for tensorboard
if not epoch % 1:
tf_writer.add_scalars(
"Clean Accuracy", {"Clean": avg_acc_clean, "Bd": avg_acc_bd, "Cross": avg_acc_cross}, epoch
)
if num_bd>0:
tf_writer.add_image("Images", grid, global_step=epoch)
schedulerC.step()
def eval(
test_transform,
netC,
optimizerC,
schedulerC,
test_dl,
noise_grid,
identity_grid,
best_clean_acc,
best_bd_acc,
best_cross_acc,
tf_writer,
epoch,
opt,
):
print(" Eval:")
netC.eval()
total_sample = 0
total_clean_correct = 0
total_bd_correct = 0
total_cross_correct = 0
total_ae_loss = 0
criterion_BCE = torch.nn.BCELoss()
for batch_idx, (inputs, targets) in enumerate(test_dl):
with torch.no_grad():
inputs, targets = inputs.to(opt.device), targets.to(opt.device)
#inputs = test_transform(inputs)
bs = inputs.shape[0]
total_sample += bs
# Evaluate Clean
preds_clean = netC(inputs)
total_clean_correct += torch.sum(torch.argmax(preds_clean, 1) == targets)
# Evaluate Backdoor
grid_temps = (identity_grid + opt.s * noise_grid / opt.input_height) * opt.grid_rescale
grid_temps = torch.clamp(grid_temps, -1, 1)
ins = torch.rand(bs, opt.input_height, opt.input_height, 2).to(opt.device) * 2 - 1
grid_temps2 = grid_temps.repeat(bs, 1, 1, 1) + ins / opt.input_height
grid_temps2 = torch.clamp(grid_temps2, -1, 1)
inputs_bd = F.grid_sample(inputs, grid_temps.repeat(bs, 1, 1, 1), align_corners=True)
if opt.attack_mode == "all2one":
targets_bd = torch.ones_like(targets) * opt.target_label
if opt.attack_mode == "all2all":
targets_bd = torch.remainder(targets + 1, opt.num_classes)
preds_bd = netC(inputs_bd)
total_bd_correct += torch.sum(torch.argmax(preds_bd, 1) == targets_bd)
acc_clean = total_clean_correct * 100.0 / total_sample
acc_bd = total_bd_correct * 100.0 / total_sample
# Evaluate cross
if opt.cross_ratio:
inputs_cross = F.grid_sample(inputs, grid_temps2, align_corners=True)
preds_cross = netC(inputs_cross)
total_cross_correct += torch.sum(torch.argmax(preds_cross, 1) == targets)
acc_cross = total_cross_correct * 100.0 / total_sample
info_string = (
"Clean Acc: {:.4f} - Best: {:.4f} | Bd Acc: {:.4f} - Best: {:.4f} | Cross: {:.4f}".format(
acc_clean, best_clean_acc, acc_bd, best_bd_acc, acc_cross, best_cross_acc
)
)
else:
info_string = "Clean Acc: {:.4f} - Best: {:.4f} | Bd Acc: {:.4f} - Best: {:.4f}".format(
acc_clean, best_clean_acc, acc_bd, best_bd_acc
)
print(batch_idx, len(test_dl), info_string)
# tensorboard
if not epoch % 1:
tf_writer.add_scalars("Test Accuracy", {"Clean": acc_clean, "Bd": acc_bd}, epoch)
# Save checkpoint
if acc_clean > best_clean_acc or (acc_clean > best_clean_acc - 0.1 and acc_bd > best_bd_acc):
print(" Saving...")
best_clean_acc = acc_clean
best_bd_acc = acc_bd
if opt.cross_ratio:
best_cross_acc = acc_cross
else:
best_cross_acc = torch.tensor([0])
state_dict = {
"netC": netC.state_dict(),
"schedulerC": schedulerC.state_dict(),
"optimizerC": optimizerC.state_dict(),
"best_clean_acc": best_clean_acc,
"best_bd_acc": best_bd_acc,
"best_cross_acc": best_cross_acc,
"epoch_current": epoch,
"identity_grid": identity_grid,
"noise_grid": noise_grid,
}
torch.save(state_dict, opt.ckpt_path)
with open(os.path.join(opt.ckpt_folder, "results.txt"), "w+") as f:
results_dict = {
"clean_acc": best_clean_acc.item(),
"bd_acc": best_bd_acc.item(),
"cross_acc": best_cross_acc.item(),
}
json.dump(results_dict, f, indent=2)
return best_clean_acc, best_bd_acc, best_cross_acc
def main():
opt = config.get_arguments().parse_args()
if opt.dataset in ["cifar10"]:
opt.num_classes = 10
if opt.dataset == "cifar10":
opt.input_height = 32
opt.input_width = 32
opt.input_channel = 3
# Dataset
opt.random_ratio = 0.95
train_dl, train_transform = get_dataloader_random_ratio(opt, True)
test_dl, test_transform = get_dataloader(opt, False)
# prepare model
netC, optimizerC, schedulerC = get_model(opt)
# Load pretrained model
mode = opt.attack_mode
opt.ckpt_folder = os.path.join(opt.checkpoints, opt.dataset)
if opt.set_arch:
opt.ckpt_folder = opt.ckpt_folder + "/neurips_wanet/" + opt.set_arch + "_" + opt.extra_flag+"_"+str(opt.target_label)
else:
opt.ckpt_folder = opt.ckpt_folder + "/neurips_wanet/" + opt.extra_flag+"_"+str(opt.target_label)
opt.ckpt_path = os.path.join(opt.ckpt_folder, "{}_{}_morph_wanet.pth.tar".format(opt.dataset, mode))
opt.log_dir = os.path.join(opt.ckpt_folder, "log_dir")
if not os.path.exists(opt.log_dir):
os.makedirs(opt.log_dir)
if opt.continue_training:
if os.path.exists(opt.ckpt_path):
print("Continue training!!")
state_dict = torch.load(opt.ckpt_path)
netC.load_state_dict(state_dict["netC"])
optimizerC.load_state_dict(state_dict["optimizerC"])
schedulerC.load_state_dict(state_dict["schedulerC"])
best_clean_acc = state_dict["best_clean_acc"]
best_bd_acc = state_dict["best_bd_acc"]
best_cross_acc = state_dict["best_cross_acc"]
epoch_current = state_dict["epoch_current"]
identity_grid = state_dict["identity_grid"]
noise_grid = state_dict["noise_grid"]
tf_writer = SummaryWriter(log_dir=opt.log_dir)
else:
print("Pretrained model doesnt exist")
exit()
else:
print("Train from scratch!!!")
best_clean_acc = 0.0
best_bd_acc = 0.0
best_cross_acc = 0.0
epoch_current = 0
# Prepare grid
ins = torch.rand(1, 2, opt.k, opt.k) * 2 - 1
ins = ins / torch.mean(torch.abs(ins))
noise_grid = (
F.upsample(ins, size=opt.input_height, mode="bicubic", align_corners=True)
.permute(0, 2, 3, 1)
.to(opt.device)
)
array1d = torch.linspace(-1, 1, steps=opt.input_height)
x, y = torch.meshgrid(array1d, array1d)
identity_grid = torch.stack((y, x), 2)[None, ...].to(opt.device)
shutil.rmtree(opt.ckpt_folder, ignore_errors=True)
os.makedirs(opt.log_dir)
with open(os.path.join(opt.ckpt_folder, "opt.json"), "w+") as f:
json.dump(opt.__dict__, f, indent=2)
tf_writer = SummaryWriter(log_dir=opt.log_dir)
for epoch in range(epoch_current, opt.n_iters):
print("Epoch {}:".format(epoch + 1))
train(train_transform,netC, optimizerC, schedulerC, train_dl, noise_grid, identity_grid, tf_writer, epoch, opt)
best_clean_acc, best_bd_acc, best_cross_acc = eval(
test_transform,
netC,
optimizerC,
schedulerC,
test_dl,
noise_grid,
identity_grid,
best_clean_acc,
best_bd_acc,
best_cross_acc,
tf_writer,
epoch,
opt,
)
if opt.save_all:
if (epoch)%opt.save_freq == 0:
state_dict = {
"netC": netC.state_dict(),
"schedulerC": schedulerC.state_dict(),
"optimizerC": optimizerC.state_dict(),
"epoch_current": epoch,
}
epoch_path = os.path.join(opt.ckpt_folder, "{}_{}_epoch{}.pth.tar".format(opt.dataset, mode,epoch))
torch.save(state_dict, epoch_path)
if __name__ == "__main__":
main()
| 16,640 | 34.107595 | 125 | py |
FeatureRE | FeatureRE-main/train_models/resnet_nole.py | import torch.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1):
# 3x3 convolution with padding
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
'''class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
#print(downsample)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.downsample is not None:
#print(x.shape)
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
def input_to_residual(self, x):
residual = x
if self.downsample is not None:
residual = self.downsample(residual)
return residual
def residual_to_output(self, residual,conv2):
x = residual + conv2
x = self.relu(x)
return x
def input_to_conv2(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
return x
def conv2_to_output(self, x, residual):
x = self.bn2(x)
x = residual + x
x = self.relu(x)
return x
def conv2_to_output_mask(self, x, residual,mask,pattern):
x = self.bn2(x)
x = residual + x
x = (1 - mask) * x + mask * pattern
x = self.relu(x)
return x
def input_to_conv1(self, x):
x = self.conv1(x)
return x
def conv1_to_output(self, x, residual):
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x += residual
x = self.relu(x)
return x'''
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
# Added another relu here
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
# Modified to use relu2
out = self.relu2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10,in_channels=3):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(kernel_size=4)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.inter_feature = {}
self.inter_gradient = {}
self.register_all_hooks()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fm(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
#x = self.avgpool(x)
return x
def input_to_conv1(self, x):
x = self.conv1(x)
return x
def conv1_to_output(self, x):
#x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
return x
def layer1_to_output(self, x):
#x = self.conv1(x)
#x = self.bn1(x)
#x = self.relu(x)
#x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
return x
def layer2_to_output(self, x):
#x = self.conv1(x)
#x = self.bn1(x)
#x = self.relu(x)
#x = self.layer1(x)
#x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer3(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
def layer3_to_output(self, x):
#x = self.conv1(x)
#x = self.bn1(x)
#x = self.relu(x)
#x = self.layer1(x)
#x = self.layer2(x)
#x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer4(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def layer4_to_output(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def make_hook(self, name, flag):
if flag == 'forward':
def hook(m, input, output):
self.inter_feature[name] = output
return hook
elif flag == 'backward':
def hook(m, input, output):
self.inter_gradient[name] = output
return hook
else:
assert False
def register_all_hooks(self):
self.conv1.register_forward_hook(self.make_hook("Conv1_Conv1_Conv1_", 'forward'))
self.layer1[0].conv1.register_forward_hook(self.make_hook("Layer1_0_Conv1_", 'forward'))
self.layer1[0].conv2.register_forward_hook(self.make_hook("Layer1_0_Conv2_", 'forward'))
self.layer1[1].conv1.register_forward_hook(self.make_hook("Layer1_1_Conv1_", 'forward'))
self.layer1[1].conv2.register_forward_hook(self.make_hook("Layer1_1_Conv2_", 'forward'))
self.layer2[0].conv1.register_forward_hook(self.make_hook("Layer2_0_Conv1_", 'forward'))
self.layer2[0].downsample.register_forward_hook(self.make_hook("Layer2_0_Downsample_", 'forward'))
self.layer2[0].conv2.register_forward_hook(self.make_hook("Layer2_0_Conv2_", 'forward'))
self.layer2[1].conv1.register_forward_hook(self.make_hook("Layer2_1_Conv1_", 'forward'))
self.layer2[1].conv2.register_forward_hook(self.make_hook("Layer2_1_Conv2_", 'forward'))
self.layer3[0].conv1.register_forward_hook(self.make_hook("Layer3_0_Conv1_", 'forward'))
self.layer3[0].downsample.register_forward_hook(self.make_hook("Layer3_0_Downsample_", 'forward'))
self.layer3[0].conv2.register_forward_hook(self.make_hook("Layer3_0_Conv2_", 'forward'))
self.layer3[1].conv1.register_forward_hook(self.make_hook("Layer3_1_Conv1_", 'forward'))
self.layer3[1].conv2.register_forward_hook(self.make_hook("Layer3_1_Conv2_", 'forward'))
self.layer4[0].conv1.register_forward_hook(self.make_hook("Layer4_0_Conv1_", 'forward'))
self.layer4[0].downsample.register_forward_hook(self.make_hook("Layer4_0_Downsample_", 'forward'))
self.layer4[0].conv2.register_forward_hook(self.make_hook("Layer4_0_Conv2_", 'forward'))
self.layer4[1].conv1.register_forward_hook(self.make_hook("Layer4_1_Conv1_", 'forward'))
self.layer4[1].conv2.register_forward_hook(self.make_hook("Layer4_1_Conv2_", 'forward'))
'''def get_all_inner_activation(self, x):
inner_output_index = [0,2,4,8,10,12,16,18]
inner_output_list = []
for i in range(23):
x = self.classifier[i](x)
if i in inner_output_index:
inner_output_list.append(x)
x = x.view(x.size(0), self.num_classes)
return x,inner_output_list'''
#############################################################################
def input_to_conv1(self, x):
x = self.conv1(x)
return x
def conv1_to_output(self, x):
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
def input_to_layer1_0_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1[0].input_to_residual(x)
return x
def layer1_0_residual_to_output(self, residual, conv2):
x = self.layer1[0].residual_to_output(residual,conv2)
x = self.layer1[1](x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer1_0_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1[0].input_to_conv2(x)
return x
def layer1_0_conv2_to_output(self, x, residual):
x = self.layer1[0].conv2_to_output(x, residual)
x = self.layer1[1](x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer1_0_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1[0].input_to_conv1(x)
return x
def layer1_0_conv1_to_output(self, x, residual):
x = self.layer1[0].conv1_to_output(x, residual)
x = self.layer1[1](x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
def input_to_layer1_1_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1[0](x)
x = self.layer1[1].input_to_residual(x)
return x
def input_to_layer1_1_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1[0](x)
x = self.layer1[1].input_to_conv2(x)
return x
def layer1_1_conv2_to_output(self, x, residual):
x = self.layer1[1].conv2_to_output(x, residual)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def layer1_1_conv2_to_output_mask(self, x, residual,mask,pattern):
x = self.layer1[1].conv2_to_output_mask(x, residual,mask,pattern)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer1_1_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1[0](x)
x = self.layer1[1].input_to_conv1(x)
return x
def layer1_1_conv1_to_output(self, x, residual):
x = self.layer1[1].conv1_to_output(x, residual)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
#############################################################################
def input_to_layer2_0_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2[0].input_to_residual(x)
return x
def layer2_0_residual_to_output(self, residual, conv2):
x = self.layer2[0].residual_to_output(residual,conv2)
x = self.layer2[1](x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer2_0_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2[0].input_to_conv2(x)
return x
def layer2_0_conv2_to_output(self, x, residual):
x = self.layer2[0].conv2_to_output(x, residual)
x = self.layer2[1](x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer2_0_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2[0].input_to_conv1(x)
return x
def layer2_0_conv1_to_output(self, x, residual):
x = self.layer2[0].conv1_to_output(x, residual)
x = self.layer2[1](x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
def input_to_layer2_1_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2[0](x)
x = self.layer2[1].input_to_residual(x)
return x
def input_to_layer2_1_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2[0](x)
x = self.layer2[1].input_to_conv2(x)
return x
def layer2_1_conv2_to_output(self, x, residual):
x = self.layer2[1].conv2_to_output(x, residual)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def layer2_1_conv2_to_output_mask(self, x, residual,mask,pattern):
x = self.layer2[1].conv2_to_output_mask(x, residual,mask,pattern)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer2_1_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2[0](x)
x = self.layer2[1].input_to_conv1(x)
return x
def layer2_1_conv1_to_output(self, x, residual):
x = self.layer2[1].conv1_to_output(x, residual)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
#############################################################################
def input_to_layer3_0_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3[0].input_to_residual(x)
return x
def layer3_0_residual_to_output(self, residual, conv2):
x = self.layer3[0].residual_to_output(residual,conv2)
x = self.layer3[1](x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer3_0_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3[0].input_to_conv2(x)
return x
def layer3_0_conv2_to_output(self, x, residual):
x = self.layer3[0].conv2_to_output(x, residual)
x = self.layer3[1](x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer3_0_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3[0].input_to_conv1(x)
return x
def layer3_0_conv1_to_output(self, x, residual):
x = self.layer3[0].conv1_to_output(x, residual)
x = self.layer3[1](x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
def input_to_layer3_1_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3[0](x)
x = self.layer3[1].input_to_residual(x)
return x
def input_to_layer3_1_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3[0](x)
x = self.layer3[1].input_to_conv2(x)
return x
def layer3_1_conv2_to_output(self, x, residual):
x = self.layer3[1].conv2_to_output(x, residual)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def layer3_1_conv2_to_output_mask(self, x, residual,mask,pattern):
x = self.layer3[1].conv2_to_output_mask(x, residual,mask,pattern)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer3_1_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3[0](x)
x = self.layer3[1].input_to_conv1(x)
return x
def layer3_1_conv1_to_output(self, x, residual):
x = self.layer3[1].conv1_to_output(x, residual)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
def input_to_layer4_0_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4[0].input_to_residual(x)
return x
def layer4_0_residual_to_output(self, residual, conv2):
x = self.layer4[0].residual_to_output(residual,conv2)
x = self.layer4[1](x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer4_0_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4[0].input_to_conv2(x)
return x
def layer4_0_conv2_to_output(self, x, residual):
x = self.layer4[0].conv2_to_output(x, residual)
x = self.layer4[1](x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer4_0_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4[0].input_to_conv1(x)
return x
def layer4_0_conv1_to_output(self, x, residual):
x = self.layer4[0].conv1_to_output(x, residual)
x = self.layer4[1](x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
def input_to_layer4_1_residual(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4[0](x)
x = self.layer4[1].input_to_residual(x)
return x
def input_to_layer4_1_conv2(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4[0](x)
x = self.layer4[1].input_to_conv2(x)
return x
def layer4_1_conv2_to_output(self, x, residual):
x = self.layer4[1].conv2_to_output(x, residual)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def layer4_1_conv2_to_output_mask(self, x, residual,mask,pattern):
x = self.layer4[1].conv2_to_output_mask(x, residual,mask,pattern)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def input_to_layer4_1_conv1(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4[0](x)
x = self.layer4[1].input_to_conv1(x)
return x
def layer4_1_conv1_to_output(self, x, residual):
x = self.layer4[1].conv1_to_output(x, residual)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
#############################################################################
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def resnet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) | 26,787 | 28.21265 | 109 | py |
FeatureRE | FeatureRE-main/models/meta_classifier_cifar10_model.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class MetaClassifierCifar10Model(nn.Module):
def __init__(self):
super(MetaClassifierCifar10Model, self).__init__()
#self.gpu = gpu
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.max_pool_0 = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.linear = nn.Linear(64*8*8, 256)
self.fc = nn.Linear(256, 256)
self.output = nn.Linear(256, 10)
self.relu = nn.ReLU()
'''if gpu:
self.cuda()'''
def forward(self, x):
'''if self.gpu:
x = x.cuda()'''
B = x.size()[0]
x = F.relu(self.conv1(x))
x = self.max_pool_0(F.relu(self.conv2(x)))
x = F.relu(self.conv3(x))
x = self.max_pool(F.relu(self.conv4(x)))
x = self.relu(self.linear(x.view(B,64*8*8)))
x = F.dropout(F.relu(self.fc(x)), 0.5, training=self.training)
x = self.output(x)
return x
'''def from_input_to_features(self, x, index):
B = x.size()[0]
x = F.relu(self.conv1(x))
x = self.max_pool(F.relu(self.conv2(x)))
x = F.relu(self.conv3(x))
x = self.max_pool(F.relu(self.conv4(x)))
return x
def from_features_to_output(self, x, index):
x = F.relu(self.linear(x.view(-1,64*8*8)))
x = F.dropout(F.relu(self.fc(x)), 0.5, training=self.training)
x = self.output(x)
return x'''
def from_input_to_features(self, x, index):
B = x.size()[0]
x = F.relu(self.conv1(x))
x = self.max_pool_0(F.relu(self.conv2(x)))
x = F.relu(self.conv3(x))
x = self.conv4(x)
x = self.max_pool(F.relu(x))
x = self.relu(self.linear(x.view(-1,64*8*8)))
#print("from_input_to_features:",x.shape)
return x
def from_features_to_output(self, x, index):
#x = self.max_pool(F.relu(x))
#x = F.relu(self.linear(x.view(-1,64*8*8)))
x = F.dropout(F.relu(self.fc(x)), 0.5, training=self.training)
x = self.output(x)
return x
| 2,441 | 31.131579 | 70 | py |
FeatureRE | FeatureRE-main/models/preact_resnet.py | """Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class PreActBlock(nn.Module):
"""Pre-activation version of the BasicBlock."""
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.ind = None
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
if self.ind is not None:
out += shortcut[:, self.ind, :, :]
else:
out += shortcut
return out
class PreActBottleneck(nn.Module):
"""Pre-activation version of the original Bottleneck module."""
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AvgPool2d(4)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def input_to_conv1(self, x):
out = self.conv1(x)
#out = self.layer1(out)
#out = self.layer2(out)
#out = self.layer3(out)
#out = self.layer4(out)
return out
def conv1_to_output(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def input_to_layer1(self, x):
out = self.conv1(x)
out = self.layer1(out)
#out = self.layer2(out)
#out = self.layer3(out)
#out = self.layer4(out)
return out
def layer1_to_output(self, x):
out = self.layer2(x)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def input_to_layer2(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
#out = self.layer3(out)
#out = self.layer4(out)
return out
def layer2_to_output(self, x):
out = self.layer3(x)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def input_to_layer3(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
#out = self.layer4(out)
return out
def layer3_to_output(self, x):
out = self.layer4(x)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def input_to_layer4(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
return out
def layer4_to_output(self, x):
out = self.avgpool(x)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def forward_activations(self, x):
out = self.conv1(x)
layer1 = self.layer1(out)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
out = self.avgpool(layer4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return layer1,layer2,layer3,layer4,out
def from_input_to_features(self, x, index):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
return out
def from_features_to_output(self, x, index):
out = self.avgpool(x)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18(num_classes=10):
return PreActResNet(PreActBlock, [2, 2, 2, 2], num_classes=num_classes)
def PreActResNet34():
return PreActResNet(PreActBlock, [3, 4, 6, 3])
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3, 4, 6, 3])
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3, 4, 23, 3])
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3, 8, 36, 3])
def test():
net = PreActResNet18()
y = net((torch.randn(1, 3, 32, 32)))
print(y.size())
# test()
| 7,366 | 29.316872 | 103 | py |
FeatureRE | FeatureRE-main/models/ULP_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
#from utils.stn import STN
class CNN_classifier(nn.Module):
""" MNIST Encoder from Original Paper's Keras based Implementation.
Args:
init_num_filters (int): initial number of filters from encoder image channels
lrelu_slope (float): positive number indicating LeakyReLU negative slope
inter_fc_dim (int): intermediate fully connected dimensionality prior to embedding layer
embedding_dim (int): embedding dimensionality
"""
def __init__(self, init_num_filters=32, lrelu_slope=0.2, inter_fc_dim=128, nofclasses=10,nofchannels=3,use_stn=True):
super(CNN_classifier, self).__init__()
self.use_stn=use_stn
self.init_num_filters_ = init_num_filters
self.lrelu_slope_ = lrelu_slope
self.inter_fc_dim_ = inter_fc_dim
self.nofclasses_ = nofclasses
if self.use_stn:
self.stn = STN()
self.features = nn.Sequential(
nn.Conv2d(nofchannels, self.init_num_filters_ * 1, kernel_size=5,padding=2),
nn.BatchNorm2d(self.init_num_filters_ * 1),
nn.ReLU(True),
nn.Conv2d(self.init_num_filters_ * 1, self.init_num_filters_ * 1, kernel_size=5,padding=2),
nn.BatchNorm2d(self.init_num_filters_ * 1),
nn.ReLU(True),
nn.MaxPool2d(2,2),
nn.Conv2d(self.init_num_filters_ * 1, self.init_num_filters_ * 1, kernel_size=5,padding=2),
nn.BatchNorm2d(self.init_num_filters_ * 1),
nn.ReLU(True),
nn.Conv2d(self.init_num_filters_ * 1, self.init_num_filters_ * 1, kernel_size=5,padding=2),
nn.BatchNorm2d(self.init_num_filters_ * 1),
nn.ReLU(True),
nn.MaxPool2d(2,2),
nn.Conv2d(self.init_num_filters_ * 1, self.init_num_filters_ * 1, kernel_size=5,padding=2),
nn.BatchNorm2d(self.init_num_filters_ * 1),
nn.ReLU(True),
nn.Conv2d(self.init_num_filters_ * 1, self.init_num_filters_ * 1, kernel_size=5,padding=2),
nn.BatchNorm2d(self.init_num_filters_ * 1),
nn.ReLU(True),
nn.MaxPool2d(2,2),
)
self.fc = nn.Sequential(
nn.Linear(self.init_num_filters_ *4*4, self.inter_fc_dim_),
nn.BatchNorm1d(self.inter_fc_dim_),
nn.ReLU(True),
nn.Dropout(p=.2),
nn.Linear(self.inter_fc_dim_, int(self.inter_fc_dim_/2)),
nn.BatchNorm1d(int(self.inter_fc_dim_/2)),
nn.ReLU(True),
nn.Dropout(p=.2),
nn.Linear(int(self.inter_fc_dim_/2), self.nofclasses_)
)
def forward(self, x):
if self.use_stn:
x = self.stn(x)
x = self.features(x)
# print(x.shape)
x = x.view(-1, self.init_num_filters_ *4*4)
x = self.fc(x)
return x
| 2,934 | 36.628205 | 121 | py |
FeatureRE | FeatureRE-main/models/lenet.py | # This part is borrowed from https://github.com/huawei-noah/Data-Efficient-Model-Compression
import torch.nn as nn
class LeNet5(nn.Module):
def __init__(self,in_channels=1):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 6, kernel_size=(5, 5))
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = nn.Conv2d(6, 16, kernel_size=(5, 5))
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv3 = nn.Conv2d(16, 120, kernel_size=(5, 5))
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(120, 84)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(84, 10)
def forward(self, img, out_feature=False):
output = self.conv1(img)
output = self.relu1(output)
output = self.maxpool1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.maxpool2(output)
output = self.conv3(output)
output = self.relu3(output)
feature = output.view(-1, 120)
output = self.fc1(feature)
output = self.relu4(output)
output = self.fc2(output)
if out_feature == False:
return output
else:
return output,feature
def forward_activations(self, x):
inner_output_list = []
output = self.conv1(x)
inner_output_list.append(output)
output = self.relu1(output)
output = self.maxpool1(output)
output = self.conv2(output)
inner_output_list.append(output)
output = self.relu2(output)
output = self.maxpool2(output)
output = self.conv3(output)
inner_output_list.append(output)
output = self.relu3(output)
feature = output.view(-1, 120)
output = self.fc1(feature)
inner_output_list.append(output)
output = self.relu4(output)
out = self.fc2(output)
return inner_output_list[0],inner_output_list[1],inner_output_list[2],inner_output_list[3],out
def get_all_inner_activation(self, img):
inner_output_list = []
output = self.conv1(img)
inner_output_list.append(output)
output = self.relu1(output)
output = self.maxpool1(output)
output = self.conv2(output)
inner_output_list.append(output)
output = self.relu2(output)
output = self.maxpool2(output)
output = self.conv3(output)
inner_output_list.append(output)
output = self.relu3(output)
feature = output.view(-1, 120)
output = self.fc1(feature)
inner_output_list.append(output)
output = self.relu4(output)
output = self.fc2(output)
return output,inner_output_list
def from_input_to_features(self, x, index):
output = self.conv1(x)
output = self.relu1(output)
output = self.maxpool1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.maxpool2(output)
output = self.conv3(output)
output = self.relu3(output)
feature = output.view(-1, 120)
output = self.fc1(feature)
x = self.relu4(output)
return x
def from_features_to_output(self, x, index):
x = self.fc2(x)
return x
class LeNet5Half(nn.Module):
def __init__(self):
super(LeNet5Half, self).__init__()
self.conv1 = nn.Conv2d(1, 3, kernel_size=(5, 5))
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = nn.Conv2d(3, 8, kernel_size=(5, 5))
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv3 = nn.Conv2d(8, 60, kernel_size=(5, 5))
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(60, 42)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(42, 10)
def forward(self, img, out_feature=False):
output = self.conv1(img)
output = self.relu1(output)
output = self.maxpool1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.maxpool2(output)
output = self.conv3(output)
output = self.relu3(output)
feature = output.view(-1, 60)
output = self.fc1(feature)
output = self.relu4(output)
output = self.fc2(output)
if out_feature == False:
return output
else:
return output,feature | 4,665 | 31.859155 | 102 | py |
MRL-CQA | MRL-CQA-master/S2SRL/train_scst_nsm.py | #!/usr/bin/env python3
import os
import sys
import random
import argparse
import logging
import numpy as np
from tensorboardX import SummaryWriter
from libbots import data, model, utils
import torch
import torch.optim as optim
import torch.nn.functional as F
import time
import ptan
SAVES_DIR = "../data/saves"
BATCH_SIZE = 8
LEARNING_RATE = 1e-3
MAX_EPOCHS = 30
MAX_TOKENS = 40
MAX_TOKENS_INT = 43
TRAIN_RATIO = 0.985
GAMMA = 0.05
MAX_MEMORY_BUFFER_SIZE = 10
# ALPHA is the bonus scalar.
# The value of α depends on the scale of task rewards.
ALPHA = 0.1
DIC_PATH = '../data/auto_QA_data/share.question'
TRAIN_QUESTION_ANSWER_PATH = '../data/auto_QA_data/nsm_mask_even_1.0%/RL_train_TR.question'
log = logging.getLogger("train")
# Calculate 0-1 sparse reward for samples in test dataset to judge the performance of the model.
def run_test(test_data, net, rev_emb_dict, end_token, device="cuda"):
argmax_reward_sum = 0.0
argmax_reward_count = 0.0
# p1 is one sentence, p2 is sentence list.
for p1, p2 in test_data:
# Transform sentence to padded embeddings.
input_seq = net.pack_input(p1, net.emb, device)
# Get hidden states from encoder.
# enc = net.encode(input_seq)
context, enc = net.encode_context(input_seq)
# Decode sequence by feeding predicted token to the net again. Act greedily.
# Return N*outputvocab, N output token indices.
_, tokens = net.decode_chain_argmax(enc, net.emb(beg_token), seq_len=data.MAX_TOKENS, context=context[0],
stop_at_token=end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in tokens:
if temp_idx in rev_emb_dict and rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(rev_emb_dict.get(temp_idx)).upper())
# Using 0-1 reward to compute accuracy.
reward = utils.calc_True_Reward(action_tokens, p2, False)
# reward = random.random()
argmax_reward_sum += float(reward)
argmax_reward_count += 1
return float(argmax_reward_sum) / float(argmax_reward_count)
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)-15s %(levelname)s %(message)s", level=logging.INFO)
# command line parameters
# -a=True means using adaptive reward to train the model. -a=False is using 0-1 reward.
sys.argv = ['train_scst_nsm.py', '--cuda',
'-l=../data/saves/crossent_even_1%/pre_bleu_0.946_55.dat',
'-n=rl_TR_1%_batch8_NSM', '-s=5', '-a=0', '--att=0', '--lstm=1',
'-w2v=50', '--beam_width=10', '--NSM']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", action='store_true', default=False, help="Enable cuda")
parser.add_argument("-n", "--name", required=True, help="Name of the run")
parser.add_argument("-l", "--load", required=True,
help="Load the pre-trained model whereby continue training the RL mode")
# Number of decoding samples.
parser.add_argument("-s", "--samples", type=int, default=4, help="Count of samples in prob mode")
# The size of the beam search.
parser.add_argument("--beam_width", type=int, default=10, help="Size of beam search")
# The dimension of the word embeddings.
parser.add_argument("-w2v", "--word_dimension", type=int, default=50, help="The dimension of the word embeddings")
# Choose the function to compute reward (0-1 or adaptive reward).
# If a = true, 1 or yes, the adaptive reward is used. Otherwise 0-1 reward is used.
parser.add_argument("-a", "--adaptive", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="0-1 or adaptive reward")
parser.add_argument("--disable-skip", default=False, action='store_true',
help="Disable skipping of samples with high argmax BLEU")
parser.add_argument("--NSM", default=False, action='store_true',
help="Neural Symbolic Machine")
# Choose the function to compute reward (0-1 or adaptive reward).
# If a = true, 1 or yes, the adaptive reward is used. Otherwise 0-1 reward is used.
parser.add_argument("--att", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using attention mechanism in seq2seq")
parser.add_argument("--lstm", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using LSTM mechanism in seq2seq")
# If false, the embedding tensors in the model do not need to be trained.
parser.add_argument('--embed-grad', action='store_false', help='optimizing word embeddings when training')
parser.add_argument("--MonteCarlo", action='store_true', default=False,
help="using Monte Carlo algorithm for REINFORCE")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
log.info("Device info: %s", str(device))
saves_path = os.path.join(SAVES_DIR, args.name)
os.makedirs(saves_path, exist_ok=True)
# # List of (question, {question information and answer}) pairs, the training pairs are in format of 1:1.
phrase_pairs, emb_dict = data.load_RL_data_TR(TRAIN_QUESTION_ANSWER_PATH, DIC_PATH, MAX_TOKENS, bool(args.NSM))
log.info("Obtained %d phrase pairs with %d uniq words from %s...", len(phrase_pairs),
len(emb_dict), TRAIN_QUESTION_ANSWER_PATH)
data.save_emb_dict(saves_path, emb_dict)
end_token = emb_dict[data.END_TOKEN]
train_data = data.encode_phrase_pairs_RLTR(phrase_pairs, emb_dict)
# # list of (seq1, [seq*]) pairs,把训练对做成1:N的形式;
# train_data = data.group_train_data(train_data)
train_data = data.group_train_data_RLTR(train_data)
rand = np.random.RandomState(data.SHUFFLE_SEED)
rand.shuffle(train_data)
train_data, test_data = data.split_train_test(train_data, TRAIN_RATIO)
log.info("Training data converted, got %d samples", len(train_data))
log.info("Train set has %d phrases, test %d", len(train_data), len(test_data))
log.info("Batch size is %d", BATCH_SIZE)
log.info("Beam search size is %d", args.beam_width)
if args.att:
log.info("Using attention mechanism to train the SEQ2SEQ model...")
else:
log.info("Train the SEQ2SEQ model without attention mechanism...")
if args.lstm:
log.info("Using LSTM mechanism to train the SEQ2SEQ model...")
else:
log.info("Using RNN mechanism to train the SEQ2SEQ model...")
if args.MonteCarlo:
log.info("Using Monte Carlo algorithm for Policy Gradient...")
if args.NSM:
log.info("Using Neural Symbolic Machine algorithm for RL...")
# Index -> word.
rev_emb_dict = {idx: word for word, idx in emb_dict.items()}
# PhraseModel.__init__() to establish a LSTM model.
net = model.PhraseModel(emb_size=args.word_dimension, dict_size=len(emb_dict), hid_size=model.HIDDEN_STATE_SIZE,
LSTM_FLAG=args.lstm, ATT_FLAG=args.att).to(device)
# Using cuda.
net.cuda()
log.info("Model: %s", net)
writer = SummaryWriter(comment="-" + args.name)
# Load the pre-trained seq2seq model.
net.load_state_dict(torch.load(args.load))
log.info("Model loaded from %s, continue training in RL mode...", args.load)
if (args.adaptive):
log.info("Using adaptive reward to train the REINFORCE model...")
else:
log.info("Using 0-1 sparse reward to train the REINFORCE model...")
# BEGIN token
beg_token = torch.LongTensor([emb_dict[data.BEGIN_TOKEN]]).to(device)
beg_token = beg_token.cuda()
# TBMeanTracker (TensorBoard value tracker):
# allows to batch fixed amount of historical values and write their mean into TB
with ptan.common.utils.TBMeanTracker(writer, batch_size=100) as tb_tracker:
# optimiser = optim.Adam(net.parameters(), lr=LEARNING_RATE, eps=1e-3)
optimiser = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=LEARNING_RATE, eps=1e-3)
batch_idx = 0
batch_count = 0
best_true_reward = None
time_start = time.time()
# Loop in epochs.
for epoch in range(MAX_EPOCHS):
random.shuffle(train_data)
dial_shown = False
# total_samples = 0
# skipped_samples = 0
true_reward_argmax = []
true_reward_sample = []
for batch in data.iterate_batches(train_data, BATCH_SIZE):
batch_idx += 1
# Each batch conduct one gradient upweight.
batch_count += 1
# optimizer.zero_grad() clears x.grad for every parameter x in the optimizer.
# It’s important to call this before loss.backward(),
# otherwise you’ll accumulate the gradients from multiple passes.
optimiser.zero_grad()
# input_seq: the padded and embedded batch-sized input sequence.
# input_batch: the token ID matrix of batch-sized input sequence. Each row is corresponding to one input sentence.
# output_batch: the token ID matrix of batch-sized output sequences. Each row is corresponding to a list of several output sentences.
input_seq, input_batch, output_batch = net.pack_batch_no_out(batch, net.emb, device)
input_seq = input_seq.cuda()
# Get (two-layer) hidden state of encoder of samples in batch.
# enc = net.encode(input_seq)
context, enc = net.encode_context(input_seq)
net_policies = []
net_actions = []
net_advantages = []
net_losses = []
probability_normalization = []
# Transform ID to embedding.
beg_embedding = net.emb(beg_token)
beg_embedding = beg_embedding.cuda()
nsm_net_losses = []
for idx, inp_idx in enumerate(input_batch):
# # Test whether the input sequence is correctly transformed into indices.
# input_tokens = [rev_emb_dict[temp_idx] for temp_idx in inp_idx]
# print (input_tokens)
# Get IDs of reference sequences' tokens corresponding to idx-th input sequence in batch.
qa_info = output_batch[idx]
# print("%s is training..." % (qa_info['qid']))
# print (qa_info['qid'])
# # Get the (two-layer) hidden state of encoder of idx-th input sequence in batch.
item_enc = net.get_encoded_item(enc, idx)
# # 'r_argmax' is the list of out_logits list and 'actions' is the list of output tokens.
# # The output tokens are generated greedily by using chain_argmax (using last setp's output token as current input token).
r_argmax, actions = net.decode_chain_argmax(item_enc, beg_embedding, data.MAX_TOKENS, context[idx],
stop_at_token=end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in rev_emb_dict and rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(rev_emb_dict.get(temp_idx)).upper())
# Get the highest BLEU score as baseline used in self-critic.
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
argmax_reward = utils.calc_True_Reward(action_tokens, qa_info, args.adaptive)
# argmax_reward = random.random()
true_reward_argmax.append(argmax_reward)
if args.NSM and 'pseudo_gold_program_reward' not in qa_info:
pseudo_program_tokens = str(qa_info['pseudo_gold_program']).strip().split()
pseudo_program_reward = utils.calc_True_Reward(pseudo_program_tokens, qa_info, args.adaptive)
qa_info['pseudo_gold_program_reward'] = pseudo_program_reward
# # In this case, the BLEU score is so high that it is not needed to train such case with RL.
# if not args.disable_skip and argmax_reward > 0.99:
# skipped_samples += 1
# continue
# In one epoch, when model is optimized for the first time, the optimized result is displayed here.
# After that, all samples in this epoch don't display anymore.
if not dial_shown:
# data.decode_words transform IDs to tokens.
log.info("Input: %s", utils.untokenize(data.decode_words(inp_idx, rev_emb_dict)))
orig_response = qa_info['orig_response']
log.info("orig_response: %s", orig_response)
log.info("Argmax: %s, reward=%.4f", utils.untokenize(data.decode_words(actions, rev_emb_dict)),
argmax_reward)
sample_logits_list, action_sequence_list = net.beam_decode(hid=item_enc, seq_len=data.MAX_TOKENS,
context=context[idx],
start_token=beg_token,
stop_at_token=end_token,
beam_width=args.beam_width,
topk=args.samples)
qid = qa_info['qid']
# The data for each task in a batch of tasks.
inner_net_policies = []
inner_net_actions = []
inner_net_advantages = []
inner_probability_normalization = []
nsm_prob_list = []
nsm_advantage_list = []
nsm_alpha_list = []
for sample_index in range(args.samples):
# 'r_sample' is the list of out_logits list and 'actions' is the list of output tokens.
# The output tokens are sampled following probability by using chain_sampling.
actions = action_sequence_list[sample_index]
r_sample = sample_logits_list[sample_index]
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in rev_emb_dict and rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(rev_emb_dict.get(temp_idx)).upper())
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
sample_reward = utils.calc_True_Reward(action_tokens, qa_info, args.adaptive)
# sample_reward = random.random()
if not dial_shown:
log.info("Sample: %s, reward=%.4f",
utils.untokenize(data.decode_words(actions, rev_emb_dict)), sample_reward)
if args.MonteCarlo:
# Record the data for each task in a batch.
inner_net_policies.append(r_sample)
inner_net_actions.extend(actions)
elif args.NSM:
# Transform each action's logits to probabilities.
prob_v = F.softmax(r_sample, dim=1).to(device)
actions_t = torch.LongTensor(actions).to(device)
# Get each action's probability.
prob = prob_v[range(len(actions)), actions_t].to(device)
# Get the probability of the action sequence.
prob_prod = prob.prod().to(device)
# Get the unbiased reward.
advantage = sample_reward - argmax_reward
nsm_prob_list.append(prob_prod)
nsm_advantage_list.append(advantage)
# Handle alpha value used in NSM.
pseudo_program = str(qa_info['pseudo_gold_program']).strip()
predicted_program = (' '.join(action_tokens)).strip()
if pseudo_program == predicted_program:
nsm_alpha_list.append(float(ALPHA))
else:
nsm_alpha_list.append(0.0)
# When finding a better program, using the better one to replace the worse one.
# If R(C_j) > R(C*) then C* <- C_j;
if sample_reward > qa_info['pseudo_gold_program_reward']:
qa_info['pseudo_gold_program'] = predicted_program
qa_info['pseudo_gold_program_reward'] = sample_reward
else:
# Record the data for all tasks in a batch.
net_policies.append(r_sample)
net_actions.extend(actions)
if not args.NSM:
advantages = [sample_reward - argmax_reward] * len(actions)
if args.MonteCarlo:
inner_net_advantages.extend(advantages)
else:
net_advantages.extend(advantages)
true_reward_sample.append(sample_reward)
dial_shown = True
# Compute the loss for each task in a batch.
if args.NSM:
# Sum all the probabilities for all the generated action sequences as Σ(p_j')
question_prob_sum = torch.stack(nsm_prob_list).sum().to(device)
# p_j = (1-α)*p_j/Σ(p_j') + α (if C_j = C*)
# p_j = (1-α)*p_j/Σ(p_j') + 0.0 (if C_j ≠ C*)
nsm_prob = torch.stack(nsm_prob_list).to(device) * (1.0 - float(ALPHA))
nsm_prob_div = torch.div(nsm_prob, question_prob_sum)
nsm_prob_add = torch.add(nsm_prob_div, torch.FloatTensor(nsm_alpha_list).to(device))
prob_v = nsm_prob_add.to(device)
# p_j = log(p_j)
log_prob_v = torch.log(prob_v).to(device)
# J = Σ_j(p_j * adv_j)
log_prob_actions_v = torch.FloatTensor(nsm_advantage_list).to(device) * log_prob_v
log_prob_actions_v = log_prob_actions_v.to(device)
# Monte carlo simulation: 1/j * J.
# Loss is: - 1/j * J.
loss_policy_v = -log_prob_actions_v.mean().to(device)
nsm_net_losses.append(loss_policy_v)
elif args.MonteCarlo:
# Logits of all the output tokens whose size is 1 * N;
inner_policies_v = torch.cat(inner_net_policies).to(device)
# Indices of all the output tokens whose size is 1 * N;
inner_actions_t = torch.LongTensor(inner_net_actions).to(device)
# All output tokens reward whose size is 1 *pack_batch N;
inner_adv_v = torch.FloatTensor(inner_net_advantages).to(device)
# Compute log(softmax(logits)) of all output tokens in size of N * output vocab size;
inner_log_prob_v = F.log_softmax(inner_policies_v, dim=1).to(device)
# Q_1 = Q_2 =...= Q_n = BLEU(OUT,REF);
# ▽J = Σ_n[Q▽logp(T)] = ▽Σ_n[Q*logp(T)] = ▽[Q_1*logp(T_1)+Q_2*logp(T_2)+...+Q_n*logp(T_n)];
# log_prob_v[range(len(net_actions)), actions_t]: for each output, get the output token's log(softmax(logits)).
# adv_v * log_prob_v[range(len(net_actions)), actions_t]:
# get Q * logp(T) for all tokens of all decode_chain_sampling samples in size of 1 * N;
inner_log_prob_actions_v = inner_adv_v * inner_log_prob_v[
range(len(inner_net_actions)), inner_actions_t].to(device)
# For the optimizer is Adam (Adaptive Moment Estimation) which is a optimizer used for gradient descent.
# Therefore, to maximize ▽J (log_prob_actions_v) is to minimize -▽J.
# .mean() is to calculate Monte Carlo sampling.
inner_loss_policy_v = -inner_log_prob_actions_v.mean().to(device)
# Record the loss for each task in a batch.
net_losses.append(inner_loss_policy_v)
if not net_policies and not net_losses and not nsm_net_losses:
continue
# Data for decode_chain_sampling samples and the number of such samples is the same as args.samples parameter.
if args.MonteCarlo:
batch_net_losses = torch.stack(net_losses).to(device)
# .mean() is utilized to calculate Mini-Batch Gradient Descent.
loss_policy_v = batch_net_losses.mean().to(device)
elif args.NSM:
# Transform a list of tensors to a tensor.
batch_nsm_net_losses = torch.stack(nsm_net_losses).to(device)
# .mean() is utilized to calculate Mini-Batch Gradient Descent.
nsm_loss_policy_v = batch_nsm_net_losses.mean().to(device)
else:
# Logits of all output tokens whose size is N * output vocab size; N is the number of output tokens of decode_chain_sampling samples.
policies_v = torch.cat(net_policies).to(device)
# Indices of all output tokens whose size is 1 * N;
actions_t = torch.LongTensor(net_actions).to(device)
# All output tokens reward whose size is 1 *pack_batch N;
adv_v = torch.FloatTensor(net_advantages).to(device)
# Compute log(softmax(logits)) of all output tokens in size of N * output vocab size;
log_prob_v = F.log_softmax(policies_v, dim=1).to(device)
# Q_1 = Q_2 =...= Q_n = BLEU(OUT,REF);
# ▽J = Σ_n[Q▽logp(T)] = ▽Σ_n[Q*logp(T)] = ▽[Q_1*logp(T_1)+Q_2*logp(T_2)+...+Q_n*logp(T_n)];
# log_prob_v[range(len(net_actions)), actions_t]: for each output, get the output token's log(softmax(logits)).
# adv_v * log_prob_v[range(len(net_actions)), actions_t]:
# get Q * logp(T) for all tokens of all decode_chain_sampling samples in size of 1 * N;
log_prob_actions_v = adv_v * log_prob_v[range(len(net_actions)), actions_t].to(device)
# For the optimizer is Adam (Adaptive Moment Estimation) which is a optimizer used for gradient descent.
# Therefore, to maximize ▽J (log_prob_actions_v) is to minimize -▽J.
# .mean() is used to calculate Monte Carlo sampling.
loss_policy_v = -log_prob_actions_v.mean().to(device)
if args.NSM:
loss_v = nsm_loss_policy_v
else:
loss_v = loss_policy_v
# loss.backward() computes dloss/dx for every parameter x which has requires_grad=True.
# These are accumulated into x.grad for every parameter x. In pseudo-code:
# x.grad += dloss/dx
loss_v.backward()
# To conduct a gradient ascent to minimize the loss (which is to maximize the reward).
# optimizer.step updates the value of x using the gradient x.grad.
# For example, the SGD optimizer performs:
# x += -lr * x.grad
optimiser.step()
if not args.MonteCarlo and not args.NSM:
tb_tracker.track("advantage", adv_v, batch_idx)
tb_tracker.track("loss_total", loss_v, batch_idx)
log.info("Epoch %d, Batch %d is trained!", epoch, batch_count)
# After one epoch, compute the bleus for samples in test dataset.
true_reward_test = run_test(test_data, net, rev_emb_dict, end_token, str(device))
# After one epoch, get the average of the decode_chain_argmax bleus for samples in training dataset.
true_reward_armax = np.mean(true_reward_argmax)
writer.add_scalar("true_reward_test", true_reward_test, batch_idx)
writer.add_scalar("true_reward_armax", true_reward_armax, batch_idx)
# After one epoch, get the average of the decode_chain_sampling bleus for samples in training dataset.
writer.add_scalar("true_reward_sample", np.mean(true_reward_sample), batch_idx)
# writer.add_scalar("skipped_samples", skipped_samples/total_samples if total_samples!=0 else 0, batch_idx)
# log.info("Batch %d, skipped_samples: %d, total_samples: %d", batch_idx, skipped_samples, total_samples)
writer.add_scalar("epoch", batch_idx, epoch)
log.info("Epoch %d, test reward: %.3f", epoch, true_reward_test)
if best_true_reward is None or best_true_reward < true_reward_test:
best_true_reward = true_reward_test
log.info("Best true reward updated: %.4f", true_reward_test)
# Save the updated seq2seq parameters trained by RL.
torch.save(net.state_dict(),
os.path.join(saves_path, "truereward_%.3f_%02d.dat" % (true_reward_test, epoch)))
# if epoch % 10 == 0:
# # The parameters are stored after each epoch.
torch.save(net.state_dict(), os.path.join(saves_path, "epoch_%03d_%.3f_%.3f.dat" % (
epoch, float(true_reward_armax), true_reward_test)))
time_end = time.time()
log.info("Training time is %.3fs." % (time_end - time_start))
print("Training time is %.3fs." % (time_end - time_start))
writer.close()
| 27,125 | 57.461207 | 153 | py |
MRL-CQA | MRL-CQA-master/S2SRL/data_test_maml.py | # !/usr/bin/env python3
# The file is used to predict the action sequences for full-data test dataset.
import argparse
import logging
import sys
from libbots import data, model, utils, metalearner
import torch
log = logging.getLogger("data_test")
DIC_PATH = '../data/auto_QA_data/share.question'
TRAIN_944K_QUESTION_ANSWER_PATH = '../data/auto_QA_data/CSQA_DENOTATIONS_full_944K.json'
DICT_944K = '../data/auto_QA_data/CSQA_result_question_type_944K.json'
DICT_944K_WEAK = '../data/auto_QA_data/CSQA_result_question_type_count944K.json'
MAX_TOKENS = 40
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)-15s %(levelname)s %(message)s", level=logging.INFO)
# Command line parameters for final test.
sys.argv = ['data_test_maml.py', '-m=epoch_020_0.784_0.741.dat', '-p=sample_final_maml',
'--n=maml_reptile', '--cuda', '-s=5', '-a=0', '--att=0', '--lstm=1',
'--fast-lr=1e-4', '--meta-lr=1e-4', '--steps=5', '--batches=1', '--weak=1', '--embed-grad']
parser = argparse.ArgumentParser()
# parser.add_argument("--data", required=True,
# help="Category to use for training. Empty string to train on full processDataset")
parser.add_argument("-m", "--model", required=True, help="Model name to load")
parser.add_argument("-p", "--pred", required=True, help="the test processDataset format, " \
"py is one-to-one (one sentence with one reference), rl is one-to-many")
parser.add_argument("-n", "--name", required=True, help="Name of the run")
# Choose the function to compute reward (0-1 or adaptive reward).
# If a = true, 1 or yes, the adaptive reward is used. Otherwise 0-1 reward is used.
parser.add_argument("--att", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using attention mechanism in seq2seq")
parser.add_argument("--lstm", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using LSTM mechanism in seq2seq")
parser.add_argument("--cuda", action='store_true', default=False, help="Enable cuda")
# Number of decoding samples.
parser.add_argument("-s", "--samples", type=int, default=4, help="Count of samples in prob mode")
# The action='store_true' means once the parameter is assigned a value, the action is to mark it as 'True';
# If there is no value of the parameter, the value is assigned as 'False'.
# Conversely, if action is 'store_false', if the parameter has a value, the parameter is viewed as 'False'.
parser.add_argument('--first-order', action='store_true', help='use the first-order approximation of MAML')
parser.add_argument('--fast-lr', type=float, default=0.0001,
help='learning rate for the 1-step gradient update of MAML')
parser.add_argument('--meta-lr', type=float, default=0.0001,
help='learning rate for the meta optimization')
parser.add_argument('--steps', type=int, default=5, help='steps in inner loop of MAML')
parser.add_argument('--batches', type=int, default=5, help='tasks of a batch in outer loop of MAML')
# If weak is true, it means when searching for support set, the questions with same number of E/R/T nut different relation will be retrieved if the questions in this pattern is less than the number of steps.
parser.add_argument("--weak", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using weak mode to search for support set")
# If a = true, 1 or yes, the adaptive reward is used. Otherwise 0-1 reward is used.
parser.add_argument("-a", "--adaptive", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="0-1 or adaptive reward")
# If false, the embedding tensors in the model do not need to be trained.
parser.add_argument('--embed-grad', action='store_false', help='fix embeddings when training')
parser.add_argument('--retriever-random', action='store_true', help='randomly get support set for the retriever')
parser.add_argument("--MonteCarlo", action='store_true', default=False,
help="using Monte Carlo algorithm for REINFORCE")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
log.info("Device info: %s", str(device))
PREDICT_PATH = '../data/saves/' + str(args.name) + '/' + str(args.pred) + '_predict.actions'
fwPredict = open(PREDICT_PATH, 'w', encoding="UTF-8")
TEST_QUESTION_PATH = '../data/auto_QA_data/mask_test/' + str(args.pred).upper() + '_test.question'
log.info("Open: %s", '../data/auto_QA_data/mask_test/' + str(args.pred).upper() + '_test.question')
phrase_pairs, emb_dict = data.load_data_MAML_TEST(QUESTION_PATH=TEST_QUESTION_PATH, DIC_PATH=DIC_PATH)
log.info("Obtained %d phrase pairs with %d uniq words", len(phrase_pairs), len(emb_dict))
phrase_pairs_944K = data.load_data_MAML(TRAIN_944K_QUESTION_ANSWER_PATH, max_tokens=MAX_TOKENS)
log.info("Obtained %d phrase pairs from %s.", len(phrase_pairs_944K), TRAIN_944K_QUESTION_ANSWER_PATH)
if args.retriever_random:
log.info("Using random support set for test.")
end_token = emb_dict[data.END_TOKEN]
# Transform token into index in dictionary.
test_data = data.encode_phrase_pairs_RLTR(phrase_pairs, emb_dict)
# # list of (seq1, [seq*]) pairs,把训练对做成1:N的形式;
# train_data = data.group_train_data(train_data)
test_data = data.group_train_data_RLTR(test_data)
train_data_944K = data.encode_phrase_pairs_RLTR(phrase_pairs_944K, emb_dict)
train_data_944K = data.group_train_data_RLTR_for_support(train_data_944K)
dict944k = data.get944k(DICT_944K)
log.info("Reading dict944k from %s is done. %d pairs in dict944k.", DICT_944K, len(dict944k))
dict944k_weak = data.get944k(DICT_944K_WEAK)
log.info("Reading dict944k_weak from %s is done. %d pairs in dict944k_weak", DICT_944K_WEAK, len(dict944k_weak))
rev_emb_dict = {idx: word for word, idx in emb_dict.items()}
net = model.PhraseModel(emb_size=model.EMBEDDING_DIM, dict_size=len(emb_dict), hid_size=model.HIDDEN_STATE_SIZE,
LSTM_FLAG=args.lstm, ATT_FLAG=args.att, EMBED_FLAG=args.embed_grad).to(device)
model_path = '../data/saves/' + str(args.name) + '/' + str(args.model)
net.load_state_dict((torch.load(model_path)))
log.info("Model loaded from %s, continue testing in MAML first-order mode...", model_path)
# BEGIN token
beg_token = torch.LongTensor([emb_dict[data.BEGIN_TOKEN]]).to(device)
beg_token = beg_token.cuda()
metaLearner = metalearner.MetaLearner(net, device=device, beg_token=beg_token, end_token=end_token,
adaptive=args.adaptive, samples=args.samples,
train_data_support_944K=train_data_944K, rev_emb_dict=rev_emb_dict,
first_order=args.first_order, fast_lr=args.fast_lr,
meta_optimizer_lr=args.meta_lr, dial_shown=False, dict=dict944k,
dict_weak=dict944k_weak, steps=args.steps, weak_flag=args.weak)
log.info("Meta-learner: %d inner steps, %f inner learning rate, "
"%d outer steps, %f outer learning rate, using weak mode:%s"
% (args.steps, args.fast_lr, args.batches, args.meta_lr, str(args.weak)))
seq_count = 0
correct_count = 0
sum_bleu = 0.0
test_dataset_count = 0
token_string_list = list()
refer_string_list = list()
batch_count = 0
# seq_1是輸入,targets是references,可能有多個;
# The dict stores the initial parameters in the modules.
old_param_dict = metaLearner.get_net_named_parameter()
for test_task in test_data:
batch_count += 1
# Batch is represented for a batch of tasks in MAML.
# In each task, a batch of support set is established.
token_string = metaLearner.first_order_sampleForTest(test_task, old_param_dict=old_param_dict, random=args.retriever_random, mc=args.MonteCarlo)
test_dataset_count += 1
# log.info("%d PREDICT: %s", test_dataset_count, token_string)
token_string_list.append(str(test_task[1]['qid']) + ': ' + token_string+'\n')
if test_dataset_count % 100 == 0:
print (test_dataset_count)
fwPredict.writelines(token_string_list)
fwPredict.close()
log.info("Writing to file %s is done!", PREDICT_PATH) | 8,582 | 58.193103 | 211 | py |
MRL-CQA | MRL-CQA-master/S2SRL/train_reptile_maml_true_reward.py | #!/usr/bin/env python3
import os
import sys
import random
import argparse
import logging
import numpy as np
from tensorboardX import SummaryWriter
from libbots import data, model, utils, metalearner
import torch
import time
import ptan
SAVES_DIR = "../data/saves"
MAX_EPOCHES = 30
MAX_TOKENS = 40
TRAIN_RATIO = 0.985
GAMMA = 0.05
DIC_PATH = '../data/auto_QA_data/share.question'
TRAIN_QUESTION_ANSWER_PATH = '../data/auto_QA_data/mask_even_1.0%/RL_train_TR_new_2k.question'
TRAIN_944K_QUESTION_ANSWER_PATH = '../data/auto_QA_data/CSQA_DENOTATIONS_full_944K.json'
DICT_944K = '../data/auto_QA_data/CSQA_result_question_type_944K.json'
DICT_944K_WEAK = '../data/auto_QA_data/CSQA_result_question_type_count944K.json'
ORDERED_QID_QUESTION_DICT = '../data/auto_QA_data/CSQA_result_question_type_count944k_orderlist.json'
QTYPE_DOC_RANGE = '../data/auto_QA_data/944k_rangeDict.json'
log = logging.getLogger("train")
# Calculate 0-1 sparse reward for samples in test dataset to judge the performance of the model.
def run_test(test_data, net, rev_emb_dict, end_token, device="cuda"):
net.encoder.flatten_parameters()
net.decoder.flatten_parameters()
argmax_reward_sum = 0.0
argmax_reward_count = 0.0
# p1 is one sentence, p2 is sentence list.
for p1, p2 in test_data:
# Transform sentence to padded embeddings.
input_seq = net.pack_input(p1, net.emb, device)
# Get hidden states from encoder.
# enc = net.encode(input_seq)
context, enc = net.encode_context(input_seq)
# Decode sequence by feeding predicted token to the net again. Act greedily.
# Return N*outputvocab, N output token indices.
_, tokens = net.decode_chain_argmax(enc, net.emb(beg_token), seq_len=data.MAX_TOKENS, context = context[0], stop_at_token=end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in tokens:
if temp_idx in rev_emb_dict and rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(rev_emb_dict.get(temp_idx)).upper())
# Using 0-1 reward to compute accuracy.
# argmax_reward_sum += float(utils.calc_True_Reward(action_tokens, p2, False))
argmax_reward_sum += random.random()
argmax_reward_count += 1
return float(argmax_reward_sum) / float(argmax_reward_count)
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)-15s %(levelname)s %(message)s", level=logging.INFO)
# # command line parameters
sys.argv = ['train_reptile_maml_true_reward.py', '-l=../data/saves/rl_even_TR_batch8_1%/truereward_0.739_29.dat',
'-n=maml_reptile', '--cuda', '-s=5', '-a=0', '--att=0', '--lstm=1', '--fast-lr=1e-4', '--meta-lr=1e-4', '--steps=5', '--batches=1', '--weak=1', '--beta=0.1']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", action='store_true', default=False, help="Enable cuda")
parser.add_argument("-n", "--name", required=True, help="Name of the run")
parser.add_argument("-l", "--load", required=True, help="Load the pre-trained model whereby continue training the RL mode")
# Number of decoding samples.
parser.add_argument("-s", "--samples", type=int, default=4, help="Count of samples in prob mode")
# Choose the function to compute reward (0-1 or adaptive reward).
# If a = true, 1 or yes, the adaptive reward is used. Otherwise 0-1 reward is used.
parser.add_argument("-a", "--adaptive", type=lambda x: (str(x).lower() in ['true', '1', 'yes']), help="0-1 or adaptive reward")
parser.add_argument("--disable-skip", default=False, action='store_true', help="Disable skipping of samples with high argmax BLEU")
# Choose the function to compute reward (0-1 or adaptive reward).
# If a = true, 1 or yes, the adaptive reward is used. Otherwise 0-1 reward is used.
parser.add_argument("--att", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using attention mechanism in seq2seq")
parser.add_argument("--lstm", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using LSTM mechanism in seq2seq")
# The action='store_true' means once the parameter is appeared in the command line, such as '--first-order',
# the action is to mark it as 'True';
# If there is no value of the parameter, the value is assigned as 'False'.
# Conversely, if action is 'store_false', if the parameter has a value, the parameter is viewed as 'False'.
parser.add_argument('--first-order', action='store_true', help='use the first-order approximation of MAML')
# If false, the embedding tensors in the model do not need to be trained.
parser.add_argument('--embed-grad', action='store_false', help='fix embeddings when training')
parser.add_argument('--docembed-grad', action='store_false', help='fix doc embeddings when training')
# If query_embed is true, using the sum of word embedding to represent the questions.
parser.add_argument('--query-embed', action='store_false', help='using the sum of word embedding to represent the questions')
parser.add_argument('--fast-lr', type=float, default=0.0001,
help='learning rate for the 1-step gradient update of MAML')
parser.add_argument('--meta-lr', type=float, default=0.0001,
help='learning rate for the meta optimization')
parser.add_argument('--beta', type=float, default=0.1,
help='learning rate for reptile')
parser.add_argument('--steps', type=int, default=5, help='steps in inner loop of MAML')
parser.add_argument('--batches', type=int, default=5, help='tasks of a batch in outer loop of MAML')
# If weak is true, it means when searching for support set, the questions with same number of E/R/T nut different relation will be retrieved if the questions in this pattern is less than the number of steps.
parser.add_argument("--weak", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help="Using weak mode to search for support set")
parser.add_argument('--retriever-random', action='store_true', help='randomly get support set for the retriever')
parser.add_argument("--MonteCarlo", action='store_true', default=False,
help="using Monte Carlo algorithm for REINFORCE")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
log.info("Device info: %s", str(device))
saves_path = os.path.join(SAVES_DIR, args.name)
os.makedirs(saves_path, exist_ok=True)
# # List of (question, {question information and answer}) pairs, the training pairs are in format of 1:1.
phrase_pairs, emb_dict = data.load_data_MAML(QUESTION_PATH=TRAIN_QUESTION_ANSWER_PATH, DIC_PATH=DIC_PATH, max_tokens=MAX_TOKENS)
log.info("Obtained %d phrase pairs with %d uniq words from %s.", len(phrase_pairs), len(emb_dict), TRAIN_QUESTION_ANSWER_PATH)
phrase_pairs_944K = data.load_data_MAML(QUESTION_PATH=TRAIN_944K_QUESTION_ANSWER_PATH, max_tokens = MAX_TOKENS)
log.info("Obtained %d phrase pairs from %s.", len(phrase_pairs_944K), TRAIN_944K_QUESTION_ANSWER_PATH)
data.save_emb_dict(saves_path, emb_dict)
end_token = emb_dict[data.END_TOKEN]
# Transform token into index in dictionary.
train_data = data.encode_phrase_pairs_RLTR(phrase_pairs, emb_dict)
# # list of (seq1, [seq*]) pairs,把训练对做成1:N的形式;
# train_data = data.group_train_data(train_data)
train_data = data.group_train_data_RLTR(train_data)
train_data_944K = data.encode_phrase_pairs_RLTR(phrase_pairs_944K, emb_dict)
train_data_944K = data.group_train_data_RLTR_for_support(train_data_944K)
dict944k = data.get944k(DICT_944K)
log.info("Reading dict944k from %s is done. %d pairs in dict944k.", DICT_944K, len(dict944k))
dict944k_weak = data.get944k(DICT_944K_WEAK)
log.info("Reading dict944k_weak from %s is done. %d pairs in dict944k_weak", DICT_944K_WEAK, len(dict944k_weak))
rand = np.random.RandomState(data.SHUFFLE_SEED)
rand.shuffle(train_data)
train_data, test_data = data.split_train_test(train_data, TRAIN_RATIO)
log.info("Training data converted, got %d samples", len(train_data))
log.info("Train set has %d phrases, test %d", len(train_data), len(test_data))
log.info("Batch size is %d", args.batches)
if args.att:
log.info("Using attention mechanism to train the SEQ2SEQ model...")
else:
log.info("Train the SEQ2SEQ model without attention mechanism...")
if args.lstm:
log.info("Using LSTM mechanism to train the SEQ2SEQ model...")
else:
log.info("Using RNN mechanism to train the SEQ2SEQ model...")
if args.embed_grad:
log.info("Word embedding in the model will be updated during the training...")
else:
log.info("Word embedding in the model will be fixed during the training...")
if args.docembed_grad:
log.info("Document embedding in the retriever model will be updated during the training...")
else:
log.info("Document embedding in the retriever model will be fixed during the training...")
if args.query_embed:
log.info("Using the sum of word embedding to represent the questions during the training...")
else:
log.info("Using the document_emb which is stored in the retriever model to represent the questions...")
# Index -> word.
rev_emb_dict = {idx: word for word, idx in emb_dict.items()}
# PhraseModel.__init__() to establish a LSTM model.
net = model.PhraseModel(emb_size=model.EMBEDDING_DIM, dict_size=len(emb_dict), hid_size=model.HIDDEN_STATE_SIZE, LSTM_FLAG=args.lstm, ATT_FLAG=args.att, EMBED_FLAG=args.embed_grad).to(device)
# Using CUDA.
net.cuda()
log.info("Model: %s", net)
# Load the pre-trained seq2seq model.
net.load_state_dict(torch.load(args.load))
# print("Pre-trained network params")
# for name, param in net.named_parameters():
# print(name, param.shape)
log.info("Model loaded from %s, continue training in RL mode...", args.load)
if args.adaptive:
log.info("Using adaptive reward to train the REINFORCE model...")
else:
log.info("Using 0-1 sparse reward to train the REINFORCE model...")
docID_dict, _ = data.get_docID_indices(data.get_ordered_docID_document(ORDERED_QID_QUESTION_DICT))
# Index -> qid.
rev_docID_dict = {id: doc for doc, id in docID_dict.items()}
qtype_docs_range = data.load_json(QTYPE_DOC_RANGE)
writer = SummaryWriter(comment="-" + args.name)
# BEGIN token
beg_token = torch.LongTensor([emb_dict[data.BEGIN_TOKEN]]).to(device)
beg_token = beg_token.cuda()
metaLearner = metalearner.MetaLearner(net=net, device=device, beg_token=beg_token, end_token=end_token, adaptive=args.adaptive, samples=args.samples, train_data_support_944K=train_data_944K, rev_emb_dict=rev_emb_dict, first_order=args.first_order, fast_lr=args.fast_lr, meta_optimizer_lr=args.meta_lr, dial_shown=False, dict=dict944k, dict_weak=dict944k_weak, steps=args.steps, weak_flag=args.weak, query_embed = args.query_embed)
log.info("Meta-learner: %d inner steps, %f inner learning rate, "
"%d outer steps, %f outer learning rate, using weak mode:%s, retriever random model:%s"
%(args.steps, args.fast_lr, args.batches, args.meta_lr, str(args.weak), str(args.retriever_random)))
# TBMeanTracker (TensorBoard value tracker):
# allows to batch fixed amount of historical values and write their mean into TB
with ptan.common.utils.TBMeanTracker(writer, batch_size=100) as tb_tracker:
batch_idx = 0
batch_count = 0
best_true_reward = None
time_start = time.time()
# Loop in epoches.
for epoch in range(MAX_EPOCHES):
dial_shown = False
random.shuffle(train_data)
total_samples = 0
skipped_samples = 0
true_reward_argmax = []
true_reward_sample = []
for batch in data.iterate_batches(train_data, args.batches):
# The dict stores the initial parameters in the modules.
old_param_dict = metaLearner.get_net_named_parameter()
# temp_param_dict = metaLearner.get_net_parameter()
batch_idx += 1
# Each batch conduct one gradient upweight.
batch_count += 1
# Batch is represented for a batch of tasks in MAML.
# In each task, a minibatch of support set is established.
meta_losses, running_vars, meta_total_samples, meta_skipped_samples, true_reward_argmax_batch, true_reward_sample_batch = metaLearner.reptile_sample(batch, old_param_dict=old_param_dict, dial_shown=dial_shown, epoch_count=epoch, batch_count=batch_count, docID_dict=docID_dict, rev_docID_dict=rev_docID_dict, emb_dict=emb_dict, qtype_docs_range=qtype_docs_range, random=args.retriever_random, monte_carlo=args.MonteCarlo)
total_samples += meta_total_samples
skipped_samples += meta_skipped_samples
true_reward_argmax.extend(true_reward_argmax_batch)
true_reward_sample.extend(true_reward_sample_batch)
metaLearner.reptile_meta_update(running_vars, old_param_dict, args.beta)
metaLearner.net.zero_grad()
temp_param_dict = metaLearner.get_net_parameter()
dial_shown = True
tb_tracker.track("meta_losses", (float)(meta_losses.cpu().detach().numpy()), batch_idx)
# After one epoch, compute the bleus for samples in test dataset.
true_reward_test = run_test(test_data, net, rev_emb_dict, end_token, device)
# After one epoch, get the average of the decode_chain_argmax bleus for samples in training dataset.
true_reward_armax = np.mean(true_reward_argmax)
writer.add_scalar("true_reward_test", true_reward_test, batch_idx)
writer.add_scalar("true_reward_armax", true_reward_armax, batch_idx)
# After one epoch, get the average of the decode_chain_sampling bleus for samples in training dataset.
writer.add_scalar("true_reward_sample", np.mean(true_reward_sample), batch_idx)
writer.add_scalar("skipped_samples", skipped_samples / total_samples if total_samples != 0 else 0,
batch_idx)
log.info("Batch %d, skipped_samples: %d, total_samples: %d", batch_idx, skipped_samples, total_samples)
writer.add_scalar("epoch", batch_idx, epoch)
log.info("Epoch %d, test reward: %.3f", epoch, true_reward_test)
if best_true_reward is None or best_true_reward < true_reward_test:
best_true_reward = true_reward_test
log.info("Best true reward updated: %.4f", true_reward_test)
# Save the updated seq2seq parameters trained by RL.
torch.save(net.state_dict(), os.path.join(saves_path, "truereward_%.3f_%02d.dat" % (true_reward_test, epoch)))
# # The parameters are stored after each epoch.
torch.save(net.state_dict(), os.path.join(saves_path, "epoch_%03d_%.3f_%.3f.dat" % (epoch, float(true_reward_armax), true_reward_test)))
time_end = time.time()
log.info("Training time is %.3fs." % (time_end - time_start))
print("Training time is %.3fs." % (time_end - time_start))
writer.close()
| 15,550 | 59.984314 | 436 | py |
MRL-CQA | MRL-CQA-master/S2SRL/libbots/adabound.py | import math
import torch
from torch.optim import Optimizer
class AdaBound(Optimizer):
"""Implements AdaBound algorithm.
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): Adam learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
final_lr (float, optional): final (SGD) learning rate (default: 0.1)
gamma (float, optional): convergence speed of the bound functions (default: 1e-3)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm
.. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:
https://openreview.net/forum?id=Bkg3g2R9FX
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,
eps=1e-8, weight_decay=0, amsbound=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= final_lr:
raise ValueError("Invalid final learning rate: {}".format(final_lr))
if not 0.0 <= gamma < 1.0:
raise ValueError("Invalid gamma parameter: {}".format(gamma))
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,
weight_decay=weight_decay, amsbound=amsbound)
super(AdaBound, self).__init__(params, defaults)
self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))
def __setstate__(self, state):
super(AdaBound, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsbound = group['amsbound']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsbound:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsbound:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay
final_lr = group['final_lr'] * group['lr'] / base_lr
lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))
upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
p.data.add_(-step_size)
return loss
class AdaBoundW(Optimizer):
"""Implements AdaBound algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): Adam learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
final_lr (float, optional): final (SGD) learning rate (default: 0.1)
gamma (float, optional): convergence speed of the bound functions (default: 1e-3)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm
.. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:
https://openreview.net/forum?id=Bkg3g2R9FX
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,
eps=1e-8, weight_decay=0, amsbound=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= final_lr:
raise ValueError("Invalid final learning rate: {}".format(final_lr))
if not 0.0 <= gamma < 1.0:
raise ValueError("Invalid gamma parameter: {}".format(gamma))
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,
weight_decay=weight_decay, amsbound=amsbound)
super(AdaBoundW, self).__init__(params, defaults)
self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))
def __setstate__(self, state):
super(AdaBoundW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsbound = group['amsbound']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsbound:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsbound:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay
final_lr = group['final_lr'] * group['lr'] / base_lr
lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))
upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
if group['weight_decay'] != 0:
decayed_weights = torch.mul(p.data, group['weight_decay'])
p.data.add_(-step_size)
p.data.sub_(decayed_weights)
else:
p.data.add_(-step_size)
return loss
| 11,340 | 47.465812 | 101 | py |
MRL-CQA | MRL-CQA-master/S2SRL/libbots/reparam_module.py | import torch
import torch.nn as nn
import warnings
import types
from collections import namedtuple
from contextlib import contextmanager
# A module is a container from which layers, model subparts (e.g. BasicBlock in resnet in torchvision) and models should inherit.
# Why should they? Because the inheritance from nn.Module allows you to call methods like to("cuda:0"), .eval(), .parameters() or register hooks easily.
class ReparamModule(nn.Module):
def __init__(self, module):
super(ReparamModule, self).__init__()
self.module = module
self.saved_views = []
param_infos = []
params = []
param_numels = []
param_shapes = []
# self.modules() uses the depth-first traversal method to returns an iterator over all modules in the network,
# including net itself, net's children, children of net's children, etc.
# 'm' itself is the module where all information of the module is included.
# Note
# self.modules() is traversing the items in self._modules.items().
# When a module is assigned to self.module, the self._modules will automatically add the module accordingly.
for m in self.modules():
print('#############################')
print(type(m))
# print(m)
print(m.named_parameters(recurse=False))
# named_parameters(prefix='', recurse=True):
# Returns an iterator over module parameters,
# yielding both the name of the parameter as well as the parameter itself.
# recurse (bool) – if True, then yields parameters of this module and all submodules.
# Otherwise, yields only parameters that are direct members of this module.
for n, p in m.named_parameters(recurse=False):
# param_infos: all the module and its weight/bias in the format of (module name, weight/bias name);
# params: the parameters (weights/biases themselves) in each module, stored in the 'Tensor' data structure.
# param_numels: the number of elements in each module;
# param_shapes: the shape of the elements in each module;
if p is not None:
param_infos.append((m, n))
# tensor.detach() creates a tensor that shares storage with tensor that does not require grad.
# tensor.clone()creates a copy of tensor that imitates the original tensor's requires_grad field.
# You should use detach() when attempting to remove a tensor from a computation graph,
# and clone as a way to copy the tensor while still keeping the copy as a part of the computation graph it came from.
# So, as you said, x.clone() maintains the connection with the computation graph.
# That means, if you use the new cloned tensor, and derive the loss from the new one,
# the gradients of that loss can be computed all the way back even beyond the point where the new tensor was created.
# However, if you detach the new tensor, as it is done in the case of .new_tensor(),
# then the gradients will only be computed from loss backward up to that new tensor but not further than that.
params.append(p.detach())
# numel(self, input): Returns the total number of elements in the `input` tensor.
param_numels.append(p.numel())
# p.size(): get the shape of p.
param_shapes.append(p.size())
# dtype: int, float, or other types;
assert len(set(p.dtype for p in params)) <= 1, \
"expects all parameters in module to have same dtype"
# store the info for unflatten
# List to tuple;
self._param_infos = tuple(param_infos)
self._param_numels = tuple(param_numels)
self._param_shapes = tuple(param_shapes)
# flatten
# p.reshape(-1): from shape(2,3) to shape(6,) a series of elements, [[1 2 3], [4 5 6]] -> [1 2 3 4 5 6]
# torch.cat(tensors, dim=0, out=None) → Tensor: torch.cat(tensors, dim=0, out=None) → Tensor;
# Concatenates the given sequence of seq tensors in the given dimension.
# All tensors must either have the same shape (except in the concatenating dimension) or be empty.
# All parameters are stored in the flat_param in a series of elements.
flat_param = nn.Parameter(torch.cat([p.reshape(-1) for p in params], 0))
# register_parameter(name, param): Adds a parameter to the module. The parameter can be accessed as an attribute using given name.
self.register_parameter('flat_param', flat_param)
self.param_numel = flat_param.numel()
# Note: why delete detached (copied with no gradients) tensors in the net?
del params
# deregister the names as parameters;
# Note: delete all weights and bias in self._param_infos, module._parameters and module.weights/biases as well;
for m, n in self._param_infos:
# Delete weight/bias from each module's _parameters.
delattr(m, n)
# register the views as plain attributes: add weights/biases into module and of course also self._param_infos, but NOT module._parameters.
self._unflatten_param(self.flat_param)
# now buffers
# they are not reparametrized. just store info as (module, name, buffer)
# Buffers are named tensors that do not update gradients at every step like parameters.
# The good thing is when you save the model, all params and buffers are saved,
# and when you move the model to or off the CUDA params and buffers will go as well.
# For example, BatchNorm’s running_mean is not a parameter, but is part of the persistent state.
# An example for a buffer can be found in _BatchNorm module where the running_mean,
# running_var and num_batches_tracked are registered as buffers and updated by accumulating statistics of data forwarded through the layer.
# This is in contrast to weight and bias parameters that learns an affine transformation of the data using regular SGD optimization.
buffer_infos = []
for m in self.modules():
for n, b in m.named_buffers(recurse=False):
if b is not None:
buffer_infos.append((m, n, b))
self._buffer_infos = tuple(buffer_infos)
self._traced_self = None
def trace(self, example_input, **trace_kwargs):
assert self._traced_self is None, 'This ReparamModule is already traced'
if isinstance(example_input, torch.Tensor):
example_input = (example_input,)
example_input = tuple(example_input)
example_param = (self.flat_param.detach().clone(),)
example_buffers = (tuple(b.detach().clone() for _, _, b in self._buffer_infos),)
self._traced_self = torch.jit.trace_module(
self,
inputs=dict(
_forward_with_param=example_param + example_input,
_forward_with_param_and_buffers=example_param + example_buffers + example_input,
),
**trace_kwargs,
)
# replace forwards with traced versions
self._forward_with_param = self._traced_self._forward_with_param
self._forward_with_param_and_buffers = self._traced_self._forward_with_param_and_buffers
return self
def clear_views(self):
for m, n in self._param_infos:
setattr(m, n, None) # This will set as plain attr
def _apply(self, *args, **kwargs):
if self._traced_self is not None:
self._traced_self._apply(*args, **kwargs)
return self
return super(ReparamModule, self)._apply(*args, **kwargs)
def _unflatten_param(self, flat_param):
# According to the value recorded in the self._param_numels and self._param_shapes
# to split the flat_param into parameter matrices tuple.
# torch.split(tensor, split_size_or_sections, dim=0): Splits the tensor into chunk along certain dimension.
# t.view(s): range shape of elements in t based on s from _param_shapes.
ps = (t.view(s) for (t, s) in zip(flat_param.split(self._param_numels), self._param_shapes))
# Insert the weights and biases back into the self._param_infos.
# note: what's the difference between deleted attributes and added attributes?
# setarr: add weights/biases into module and of course also self._param_infos, but NOT module._parameters.
for (m, n), p in zip(self._param_infos, ps):
setattr(m, n, p) # This will set as plain attr
# By using '@contextmanager', when using 'with' to call 'unflattened_param', the codes before 'yield' will be executed,
# which means the original parameters (theta) will be stored in the saved_views.
# Then the input flat_param will insert into parameters of modules in the net.
# When 'unflattened_param' is closed, the codes after 'yield' will be executed to again insert the original parameters 'theta' into the net.
@contextmanager
def unflattened_param(self, flat_param):
# getattr(object, name[, default]) -> value
# Get a named attribute from an object; getattr(x, 'y') is equivalent to x.y.
# Get a list of tensors (weights/biases).
saved_views = [getattr(m, n) for m, n in self._param_infos]
self._unflatten_param(flat_param)
yield
# Why not just `self._unflatten_param(self.flat_param)`?
# 1. because of https://github.com/pytorch/pytorch/issues/17583
# 2. slightly faster since it does not require reconstruct the split+view
# graph
for (m, n), p in zip(self._param_infos, saved_views):
setattr(m, n, p)
@contextmanager
def replaced_buffers(self, buffers):
for (m, n, _), new_b in zip(self._buffer_infos, buffers):
setattr(m, n, new_b)
yield
for m, n, old_b in self._buffer_infos:
setattr(m, n, old_b)
def _forward_with_param_and_buffers(self, flat_param, buffers, *inputs, **kwinputs):
with self.unflattened_param(flat_param):
with self.replaced_buffers(buffers):
return self.module(*inputs, **kwinputs)
def _forward_with_param(self, flat_param, *inputs, **kwinputs):
with self.unflattened_param(flat_param):
return self.module(*inputs, **kwinputs)
def forward(self, *inputs, flat_param=None, buffers=None, **kwinputs):
if flat_param is None:
flat_param = self.flat_param
if buffers is None:
return self._forward_with_param(flat_param, *inputs, **kwinputs)
else:
return self._forward_with_param_and_buffers(flat_param, tuple(buffers), *inputs, **kwinputs)
def _set_param_and_buffers(self, flat_param, buffers, **kwinputs):
# getattr(object, name[, default]) -> value
# Get a named attribute from an object; getattr(x, 'y') is equivalent to x.y.
# Get a list of tensors (weights/biases).
self.saved_views = [getattr(m, n) for m, n in self._param_infos]
self._unflatten_param(flat_param)
for (m, n, _), new_b in zip(self._buffer_infos, buffers):
setattr(m, n, new_b)
def _set_param(self, flat_param, **kwinputs):
# getattr(object, name[, default]) -> value
# Get a named attribute from an object; getattr(x, 'y') is equivalent to x.y.
# Get a list of tensors (weights/biases).
self.saved_views = [getattr(m, n) for m, n in self._param_infos]
self._unflatten_param(flat_param)
def set_parameter_buffer(self, flat_param=None, buffers=None, **kwinputs):
if flat_param is None:
flat_param = self.flat_param
if buffers is None:
self._set_param(flat_param, **kwinputs)
else:
self._set_param_and_buffers(flat_param, tuple(buffers), **kwinputs)
# Reset the parameters and buffers to initial value of the model.
def reset_initial_parameter_buffer(self):
for (m, n), p in zip(self._param_infos, self.saved_views):
setattr(m, n, p)
for m, n, old_b in self._buffer_infos:
setattr(m, n, old_b)
| 12,442 | 53.336245 | 152 | py |
MRL-CQA | MRL-CQA-master/S2SRL/libbots/bert_model.py | import numpy as np
import operator
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
import torch.nn.functional as F
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
from . import utils
from . import attention
from . import beam_search_node
from queue import PriorityQueue
HIDDEN_STATE_SIZE = 128
EMBEDDING_DIM = 300
# nn.Module: Base class for all neural network modules.
# Your models should also subclass this class.
class CqaBertModel(nn.Module):
def __init__(self, pre_trained_model_name, fix_flag, emb_size, dict_size, hid_size, LSTM_FLAG, EMBED_FLAG=True, ATT_FLAG=False, BERT_TO_EMBEDDING_FLAG=False):
super(CqaBertModel, self).__init__()
# self.embedding = torch.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embeding_dim)
# num_embeddings = vocab_size
# embedding_dim = embeding_dim
# If no pre-trained embeddings is designated, the random vectors will be initialized.
self.emb = nn.Embedding(num_embeddings=dict_size, embedding_dim=emb_size)
if not EMBED_FLAG:
self.freeze_embedding()
else:
self.unfreeze_embedding()
self.bert = BertModel.from_pretrained(pre_trained_model_name)
if fix_flag:
self.freeze_bert_encoder()
else:
self.unfreeze_bert_encoder()
self.drop = nn.Dropout(p=0.3)
self.bert_out = nn.Linear(self.bert.config.hidden_size, hid_size)
# self.encoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,
# num_layers=1, batch_first=True)
self.decoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,
num_layers=1, batch_first=True)
self.output = nn.Sequential(
nn.Linear(hid_size, dict_size)
)
self.lstm_flag = LSTM_FLAG
self.attention_flag = ATT_FLAG
if self.attention_flag:
self.attention = attention.Attention(hid_size)
print('Build attention layer.')
# Transform the pooled output of the BERT into the initial input of the LSTM decoder with the dimension of the word embedding.
if BERT_TO_EMBEDDING_FLAG:
self.bert_to_embedding = nn.Linear(hid_size, emb_size)
# The last_hidden_state is a sequence of hidden states of the last layer of the model.
# Obtaining the pooled_output is done by applying the BertPooler on last_hidden_state
# You can think of the pooled_output as a summary of the content, according to BERT.
def bert_encode(self, input_ids, attention_mask):
last_hidden_state, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
)
output_pool = self.drop(pooled_output)
output_hidden_states = self.drop(last_hidden_state)
return self.bert_out(output_pool), self.bert_out(output_hidden_states)
def freeze_embedding(self):
for param in self.emb.parameters():
param.requires_grad = False
def unfreeze_embedding(self):
for param in self.emb.parameters():
param.requires_grad = True
def freeze_bert_encoder(self):
for param in self.bert.parameters():
param.requires_grad = False
def unfreeze_bert_encoder(self):
for param in self.bert.parameters():
param.requires_grad = True
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
# print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
# print(param.grad)
param.grad.zero_()
params[name].grad = None
# Using the parameters to insert into network and compute output.
def insert_new_parameter(self, state_dict, strict):
self.load_state_dict(state_dict, strict)
# Using the parameters to insert into network and compute output.
def insert_new_parameter_to_layers(self, old_param_dict):
for (name, param) in self.named_parameters():
if param.requires_grad:
param.data = old_param_dict[name].clone().detach()
# Return all the parameters that have grads.
def grad_parameters(self, recurse=True):
r"""Returns an iterator over module parameters.
This is typically passed to an optimizer.
Args:
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
Parameter: module parameter
Example::
# >>> for param in model.parameters():
# >>> print(type(param.data), param.size())
<class 'torch.FloatTensor'> (20L,)
<class 'torch.FloatTensor'> (20L, 1L, 5L, 5L)
"""
for name, param in self.named_parameters(recurse=recurse):
if param.requires_grad:
yield param
# Return all the named_parameters that have grads.
def grad_named_parameters(self, recurse=True):
for name, param in self.named_parameters(recurse=recurse):
if param.requires_grad:
yield (name, param)
# hidden state;
# return hid: (h_n, c_n) is tensor containing the hidden state and cell state for t = seq_len.
def encode(self, x):
_, hid = self.encoder(x)
return hid
# Get each time step's hidden state for encoder;
# return outputs: output, (h_n, c_n) for LSTM;
# context is (seq_len, batch, num_directions * hidden_size):
# tensor containing the output features (h_t) from the last layer of the LSTM, for each t.
# hid is (h_n, c_n), which is tensor containing the hidden state and cell state for t = seq_len.
def encode_context(self, x):
packed_context, hid = self.encoder(x)
# It is an inverse operation to :func:`pack_padded_sequence`.
# Unpack your output if required.
unpack_context, input_sizes = rnn_utils.pad_packed_sequence(packed_context, batch_first=True)
return unpack_context, hid
def get_encoded_item(self, encoded, index):
# For RNN
if not self.lstm_flag:
return encoded[:, index:index+1]
# For LSTM
if self.lstm_flag:
return encoded[0][:, index:index+1].contiguous(), \
encoded[1][:, index:index+1].contiguous()
def decode_teacher(self, hid, input_seq, context):
# Method assumes batch of size=1
out, _ = self.decoder(input_seq, hid)
if self.attention_flag:
out, attn = self.attention(out, context)
out = self.output(out.data)
return out
def decode_one(self, hid, input_x, context):
# Example for unsqueeze:
# >>> x = torch.tensor([1, 2, 3, 4])
# >>> torch.unsqueeze(x, 0)
# tensor([[ 1, 2, 3, 4]])
# >>> torch.unsqueeze(x, 1)
# tensor([[ 1],
# [ 2],
# [ 3],
# [ 4]])
out, new_hid = self.decoder(input_x.unsqueeze(0), hid)
if (self.attention_flag):
out, attn = self.attention(out, context)
# Self.output(out) using nn.Linear(hid_size, dict_size) to transform hidden states into logits over output vocab.
out = self.output(out)
# squeeze: Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
return out.squeeze(dim=0), new_hid
def bert_decode_one(self, input_x):
# Example for unsqueeze:
# >>> x = torch.tensor([1, 2, 3, 4])
# >>> torch.unsqueeze(x, 0)
# tensor([[ 1, 2, 3, 4]])
# >>> torch.unsqueeze(x, 1)
# tensor([[ 1],
# [ 2]
# [ 3],
# [ 4]])
# Inputs of LSTM: input, (h_0, c_0)
# - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
# of the input sequence.
# The input can also be a packed variable length sequence.
# See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
# :func:`torch.nn.utils.rnn.pack_sequence` for details.
# - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
# containing the initial hidden state for each element in the batch.
# If the LSTM is bidirectional, num_directions should be 2, else it should be 1.
# - **c_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
# containing the initial cell state for each element in the batch.
# If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
# batch_first – If True,
# then the input and output tensors are provided as (batch, seq, feature). Default: False
# In our LSTM, the batch_first is set as True.
out, hc = self.decoder(input_x.unsqueeze(1))
return hc
def decode_chain_argmax(self, hid, begin_emb, seq_len, context, stop_at_token=None):
"""
Decode sequence by feeding predicted token to the net again. Act greedily
"""
res_logits = []
res_tokens = []
# First cur_emb is the embedding of '#BEG'.
cur_emb = begin_emb
# At first using the '#BEG' as first input token and hidden states from encoder as initial hidden state to predict the first output token and first decoder hidden state.
# Then predict the output token by using last step's output token as current step's input and last step's decoder hidden state.
for _ in range(seq_len):
# The out_logits is the distribution over whole output vocabulary.
# The hid is new hidden state generated from current time step.
out_logits, hid = self.decode_one(hid, cur_emb, context)
# After torch.max operation, the result is a list.
# First element is the largest logit value in dimension-1 (each row), the second value is the index of the largest logit value.
# >>> a = torch.randn(4, 4)
# >>> a
# tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
# [ 1.1949, -1.1127, -2.2379, -0.6702],
# [ 1.5717, -0.9207, 0.1297, -1.8768],
# [-0.6172, 1.0036, -0.6060, -0.2432]])
# >>> torch.max(a, 1)
# (tensor([ 0.8475, 1.1949, 1.5717, 1.0036]), tensor([ 3, 0, 0, 1]))
out_token_v = torch.max(out_logits, dim=1)[1]
# Transform tensorflow to array and return array[0];
out_token = out_token_v.data.cpu().numpy()[0]
# Using current output token's embedding.
cur_emb = self.emb(out_token_v)
# The list of out_logits list.
res_logits.append(out_logits)
# The list of output tokens.
res_tokens.append(out_token)
# When the EOS is predicted the prediction is ended.
if stop_at_token is not None and out_token == stop_at_token:
break
# torch.cat(tensors, dim=0, out=None) → Tensor
# Concatenates the given sequence of seq tensors in the given dimension.
# All tensors must either have the same shape (except in the concatenating dimension) or be empty.
# >>> x = torch.randn(2, 3)
# >>> x
# tensor([[ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497]])
# >>> torch.cat((x, x, x), 0)
# tensor([[ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497],
# [ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497],
# [ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497]])
# >>> torch.cat((x, x, x), 1)
# tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
# -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
# -0.5790, 0.1497]])
# Concatenate follow rows.
return torch.cat(res_logits), res_tokens
def decode_chain_sampling(self, hid, begin_emb, seq_len, context, stop_at_token=None):
"""
Decode sequence by feeding predicted token to the net again.
Act according to probabilities
"""
res_logits = []
res_actions = []
cur_emb = begin_emb
for _ in range(seq_len):
out_logits, hid = self.decode_one(hid, cur_emb, context)
# Using softmax to transform logits to probabilities.
out_probs_v = F.softmax(out_logits, dim=1)
out_probs = out_probs_v.data.cpu().numpy()[0]
# np.random.choice(out_probs.shape[0], p=out_probs):
# choose one index from out_probs.shape[0] by the probabilities associated with each entry as out_probs.
action = int(np.random.choice(out_probs.shape[0], p=out_probs))
# Transform action to tensor and cast it to the device where begin_emb is in.
action_v = torch.LongTensor([action]).to(begin_emb.device)
action_v = action_v.cuda()
# Get the embedding of the sampled output token.
cur_emb = self.emb(action_v)
res_logits.append(out_logits)
res_actions.append(action)
if stop_at_token is not None and action == stop_at_token:
break
return torch.cat(res_logits), res_actions
def beam_decode(self, hid, seq_len, context, start_token, stop_at_token = None, beam_width = 10, topk = 5):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
endnodes = []
number_required = min((topk + 1), topk - len(endnodes))
# Start with the start of the sentence token: torch.LongTensor([[SOS_token]], device=device)
# starting node - hidden vector, previous node, word id, logp, length, logits
node = beam_search_node.BeamSearchNode(hid, None, start_token, 0, 1, None)
nodes = PriorityQueue()
# start the queue
# The smaller the value is, the higher the priority of the node is.
# The unique ID of a node is used to avoid conflict between elements in the heap.
nodes.put((-node.eval(), id(node), node))
qsize = 1
# start beam search
while True:
# give up when decoding takes too long
if qsize > 2000:
break
# fetch the best node
score, _, n = nodes.get()
action_v = n.wordid
# Get the embedding of the sampled output token.
decoder_input = self.emb(action_v)
decoder_hidden = n.h
# tensor.item(): if only one element is in the tensor, tensor.item() will return the value of the element.
if n.wordid.item() == stop_at_token or n.leng == seq_len and n.prevNode != None:
endnodes.append((score, id(n), n))
# if we reached maximum # of sentences required
if len(endnodes) >= number_required:
break
else:
continue
# decode for one step using decoder
# decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_output)
out_logits, decoder_hidden = self.decode_one(decoder_hidden, decoder_input, context)
decoder_output = F.log_softmax(out_logits, dim=1)
# out_probs = out_probs_v.data.cpu().numpy()[0]
# PUT HERE REAL BEAM SEARCH OF TOP
log_prob, indexes = torch.topk(decoder_output, beam_width)
nextnodes = []
for new_k in range(beam_width):
decoded_t = indexes[0][new_k].view(-1)
log_p = log_prob[0][new_k].item()
# hidden vector, previous node, word id, logp, length
node = beam_search_node.BeamSearchNode(decoder_hidden, n, decoded_t, n.logp + log_p, n.leng + 1, out_logits)
score = -node.eval()
nextnodes.append((score, node))
# put them into queue
for i in range(len(nextnodes)):
score, nn = nextnodes[i]
nodes.put((score, id(nn), nn))
# increase qsize
qsize += len(nextnodes) - 1
# choose nbest paths, back trace them
if len(endnodes) == 0:
endnodes = [nodes.get() for _ in range(topk)]
utterances = []
all_res_logits = []
# The sorted() function sorts the elements of a given iterable
# in a specific order (either ascending or descending).
# The syntax of sorted() is: sorted(iterable, key=None, reverse=False)
# This is a great way to sort a list of tuples on the second key:
# a = [ (1,'z'), (2, 'x'), (3, 'y') ]
# a.sort(key=operator.itemgetter(1))
# Or using lambda: a.sort(key=lambda x: x[1])
for score, _, n in sorted(endnodes, key=operator.itemgetter(0)):
utterance = []
res_logits = []
# back trace
while n.prevNode != None:
utterance.append(n.wordid.item())
res_logits.append(n.logits)
n = n.prevNode
# [::-1]: Reverse.
utterance = utterance[::-1]
res_logits = res_logits[::-1]
utterances.append(utterance)
all_res_logits.append(torch.cat(res_logits))
return all_res_logits, utterances
def pack_batch_no_out(self, batch, embeddings, device="cpu"):
# Asserting statements is a convenient way to insert debugging assertions into a program.
# To guarantee that the batch is a list.
assert isinstance(batch, list)
# The format of batch is a list of tuple: ((tuple),[[list of token ID list]])
# A lambda function is a small anonymous function, the example is as following.
# x = lambda a, b: a * b
# print(x(5, 6))
# Sort descending (CuDNN requirements) batch中第一个元素为最长的句子;
batch.sort(key=lambda s: len(s[0]), reverse=True)
# input_idx:一个batch的输入句子的tokens对应的ID矩阵;Each row is corresponding to one input sentence.
# output_idx:一个batch的输出句子的tokens对应的ID矩阵;Each row is corresponding to a list of several output sentences.
# zip wants a bunch of arguments to zip together, but what you have is a single argument (a list, whose elements are also lists).
# The * in a function call "unpacks" a list (or other iterable), making each of its elements a separate argument.
# For list p = [[1,2,3],[4,5,6]];
# So without the *, you're doing zip( [[1,2,3],[4,5,6]] ). With the *, you're doing zip([1,2,3], [4,5,6]) = [(1, 4), (2, 5), (3, 6)].
input_idx, output_idx = zip(*batch)
# create padded matrix of inputs
# map() function returns a list of the results after applying the given function to each item of a given iterable (list, tuple etc.)
# For example:
# numbers = (1, 2, 3, 4)
# result = map(lambda x: x + x, numbers)
# print(list(result))
# Output: {2, 4, 6, 8}
# 建立长度词典,为batch中每一个元素的长度;
lens = list(map(len, input_idx))
# 以最长的句子来建立batch*最长句子长度的全0矩阵;
input_mat = np.zeros((len(batch), lens[0]), dtype=np.int64)
# 将batch中每个句子的tokens对应的ID向量填入全0矩阵完成padding;
# idx:index,x:token ID 组成的向量;
for idx, x in enumerate(input_idx):
input_mat[idx, :len(x)] = x
# 将padding后的矩阵转换为tensor matrix;
input_v = torch.tensor(input_mat).to(device)
input_v = input_v.cuda()
# 封装成PackedSequence类型的对象;
# The padded sequence is the transposed matrix which is ``B x T x *``,
# where `T` is the length of the longest sequence and `B` is the batch size.
# Following the matrix is the list of lengths of each sequence in the batch (also in transposed format).
# For instance:
# [ a b c c d d d ]
# [ a b c d ]
# [ a b c ]
# [ a b ]
# could be transformed into [a,a,a,a,b,b,b,b,c,c,c,c,d,d,d,d] with batch size [4,4,3,2,1,1,1].
input_seq = rnn_utils.pack_padded_sequence(input_v, lens, batch_first=True)
input_seq = input_seq.cuda()
r = embeddings(input_seq.data)
# lookup embeddings;embeddings为模型已经建立的词向量矩阵;
# r: the [B x T x dimension] matrix of the embeddings of the occurred words in input sequence.
# The order is followed by the order in input_seq.
# Which is transforming [a,a,a,a,b,b,b,b,c,c,c,c,d,d,d,d] into [embedding(a), embedding(a), ..., embedding(d), embedding(d)]
r = r.cuda()
# 加入了词嵌入的input_seq;
# For instance, given data ``abc`` and `x`
# the :class:`PackedSequence` would contain data ``axbc`` with ``batch_sizes=[2,1,1]``.
# emb_input_seq is [B x T x dimension] matrix of the embeddings of the occurred words in input sequence with the batch size.
# For instance, emb_input_seq is the padded data: [embedding(a), embedding(a), ..., embedding(d), embedding(d)] with batch size [4,4,3,2,1,1,1].
emb_input_seq = rnn_utils.PackedSequence(r, input_seq.batch_sizes)
emb_input_seq = emb_input_seq.cuda()
return emb_input_seq, input_idx, output_idx
def pack_input(self, input_data, embeddings, device="cpu"):
input_v = torch.LongTensor([input_data]).to(device)
input_v = input_v.cuda()
r = embeddings(input_v)
return rnn_utils.pack_padded_sequence(r, [len(input_data)], batch_first=True)
def pack_batch(self, batch, embeddings, device="cpu"):
emb_input_seq, input_idx, output_idx = self.pack_batch_no_out(batch, embeddings, device)
# prepare output sequences, with end token stripped
output_seq_list = []
for out in output_idx:
output_seq_list.append(self.pack_input(out[:-1], embeddings, device))
return emb_input_seq, output_seq_list, input_idx, output_idx
def seq_bleu(self, model_out, ref_seq):
model_seq = torch.max(model_out.data, dim=1)[1]
model_seq = model_seq.cpu().numpy()
return utils.calc_bleu(model_seq, ref_seq)
| 23,511 | 46.595142 | 177 | py |
MRL-CQA | MRL-CQA-master/S2SRL/libbots/model.py | import numpy as np
import operator
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
import torch.nn.functional as F
from collections import OrderedDict
from . import utils
from . import attention
from . import beam_search_node
from queue import PriorityQueue
HIDDEN_STATE_SIZE = 128
EMBEDDING_DIM = 50
# nn.Module: Base class for all neural network modules.
# Your models should also subclass this class.
class PhraseModel(nn.Module):
def __init__(self, emb_size, dict_size, hid_size, LSTM_FLAG, ATT_FLAG, EMBED_FLAG=True):
# Call __init__ function of PhraseModel's parent class (nn.Module).
super(PhraseModel, self).__init__()
# self.embedding = torch.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embeding_dim)
# num_embeddings = vocab_size
# embedding_dim = embeding_dim
# If no pre-trained embeddings is designated, the random vectors will be initialized.
self.emb = nn.Embedding(num_embeddings=dict_size, embedding_dim=emb_size)
if not EMBED_FLAG:
for p in self.parameters():
p.requires_grad = False
# # BiLSTM
# self.encoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,
# num_layers=1, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,
# num_layers=2, batch_first=True)
# LSTM
# Inputs of LSTM are: input, (h_0, c_0).
# In which input is (seq_len, batch, input_size): tensor containing the features of the input sequence.
# (h_0, c_0) is the initial hidden state and cell state for each element in the batch.
# Outputs of LSTM are: output, (h_n, c_n).
# In which output is (seq_len, batch, num_directions * hidden_size):
# tensor containing the output features (h_t) from the last layer of the LSTM, for each t.
# (h_n, c_n) is tensor containing the hidden state and cell state for t = seq_len in the batch.
self.encoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,
num_layers=1, batch_first=True)
self.decoder = nn.LSTM(input_size=emb_size, hidden_size=hid_size,
num_layers=1, batch_first=True)
self.output = nn.Sequential(
nn.Linear(hid_size, dict_size)
)
self.lstm_flag = LSTM_FLAG
self.attention_flag = ATT_FLAG
if self.attention_flag:
self.attention = attention.Attention(hid_size)
print('Build attention layer.')
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
# print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
# print(param.grad)
param.grad.zero_()
params[name].grad = None
# Using the parameters to insert into network and compute output.
def insert_new_parameter(self, state_dict, strict):
self.load_state_dict(state_dict, strict)
# Using the parameters to insert into network and compute output.
def insert_new_parameter_to_layers(self, old_param_dict):
for (name, param) in self.named_parameters():
if param.requires_grad:
param.data = old_param_dict[name].clone().detach()
# Return all the parameters that have grads.
def grad_parameters(self, recurse=True):
r"""Returns an iterator over module parameters.
This is typically passed to an optimizer.
Args:
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
Parameter: module parameter
Example::
# >>> for param in model.parameters():
# >>> print(type(param.data), param.size())
<class 'torch.FloatTensor'> (20L,)
<class 'torch.FloatTensor'> (20L, 1L, 5L, 5L)
"""
for name, param in self.named_parameters(recurse=recurse):
if param.requires_grad:
yield param
# Return all the named_parameters that have grads.
def grad_named_parameters(self, recurse=True):
for name, param in self.named_parameters(recurse=recurse):
if param.requires_grad:
yield (name, param)
# hidden state;
# return hid: (h_n, c_n) is tensor containing the hidden state and cell state for t = seq_len.
def encode(self, x):
_, hid = self.encoder(x)
return hid
# Get each time step's hidden state for encoder;
# return outputs: output, (h_n, c_n) for LSTM;
# context is (seq_len, batch, num_directions * hidden_size):
# tensor containing the output features (h_t) from the last layer of the LSTM, for each t.
# hid is (h_n, c_n), which is tensor containing the hidden state and cell state for t = seq_len.
def encode_context(self, x):
# Inputs of LSTM: input, (h_0, c_0)
# - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
# of the input sequence.
# The input can also be a packed variable length sequence.
# See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
# :func:`torch.nn.utils.rnn.pack_sequence` for details.
# - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
# containing the initial hidden state for each element in the batch.
# If the LSTM is bidirectional, num_directions should be 2, else it should be 1.
# - **c_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
# containing the initial cell state for each element in the batch.
# If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
packed_context, hid = self.encoder(x)
# It is an inverse operation to :func:`pack_padded_sequence`.
# Unpack your output if required.
unpack_context, input_sizes = rnn_utils.pad_packed_sequence(packed_context, batch_first=True)
return unpack_context, hid
def get_encoded_item(self, encoded, index):
# For RNN
if not self.lstm_flag:
return encoded[:, index:index+1]
# For LSTM
if self.lstm_flag:
return encoded[0][:, index:index+1].contiguous(), \
encoded[1][:, index:index+1].contiguous()
def decode_teacher(self, hid, input_seq, context):
# Method assumes batch of size=1
out, _ = self.decoder(input_seq, hid)
if self.attention_flag:
out, attn = self.attention(out, context)
out = self.output(out.data)
return out
def decode_one(self, hid, input_x, context):
# Example for unsqueeze:
# >>> x = torch.tensor([1, 2, 3, 4])
# >>> torch.unsqueeze(x, 0)
# tensor([[ 1, 2, 3, 4]])
# >>> torch.unsqueeze(x, 1)
# tensor([[ 1],
# [ 2],
# [ 3],
# [ 4]])
out, new_hid = self.decoder(input_x.unsqueeze(0), hid)
if (self.attention_flag):
out, attn = self.attention(out, context)
# Self.output(out) using nn.Linear(hid_size, dict_size) to transform hidden states into logits over output vocab.
out = self.output(out)
# squeeze: Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
return out.squeeze(dim=0), new_hid
def decode_chain_argmax(self, hid, begin_emb, seq_len, context, stop_at_token=None):
"""
Decode sequence by feeding predicted token to the net again. Act greedily
"""
res_logits = []
res_tokens = []
# First cur_emb is the embedding of '#BEG'.
cur_emb = begin_emb
# At first using the '#BEG' as first input token and hidden states from encoder as initial hidden state to predict the first output token and first decoder hidden state.
# Then predict the output token by using last step's output token as current step's input and last step's decoder hidden state.
for _ in range(seq_len):
# The out_logits is the distribution over whole output vocabulary.
# The hid is new hidden state generated from current time step.
out_logits, hid = self.decode_one(hid, cur_emb, context)
# After torch.max operation, the result is a list.
# First element is the largest logit value in dimension-1 (each row), the second value is the index of the largest logit value.
# >>> a = torch.randn(4, 4)
# >>> a
# tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
# [ 1.1949, -1.1127, -2.2379, -0.6702],
# [ 1.5717, -0.9207, 0.1297, -1.8768],
# [-0.6172, 1.0036, -0.6060, -0.2432]])
# >>> torch.max(a, 1)
# (tensor([ 0.8475, 1.1949, 1.5717, 1.0036]), tensor([ 3, 0, 0, 1]))
out_token_v = torch.max(out_logits, dim=1)[1]
# Transform tensorflow to array and return array[0];
out_token = out_token_v.data.cpu().numpy()[0]
# Using current output token's embedding.
cur_emb = self.emb(out_token_v)
# The list of out_logits list.
res_logits.append(out_logits)
# The list of output tokens.
res_tokens.append(out_token)
# When the EOS is predicted the prediction is ended.
if stop_at_token is not None and out_token == stop_at_token:
break
# torch.cat(tensors, dim=0, out=None) → Tensor
# Concatenates the given sequence of seq tensors in the given dimension.
# All tensors must either have the same shape (except in the concatenating dimension) or be empty.
# >>> x = torch.randn(2, 3)
# >>> x
# tensor([[ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497]])
# >>> torch.cat((x, x, x), 0)
# tensor([[ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497],
# [ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497],
# [ 0.6580, -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497]])
# >>> torch.cat((x, x, x), 1)
# tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
# -1.0969, -0.4614],
# [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
# -0.5790, 0.1497]])
# Concatenate follow rows.
return torch.cat(res_logits), res_tokens
def decode_chain_sampling(self, hid, begin_emb, seq_len, context, stop_at_token=None):
"""
Decode sequence by feeding predicted token to the net again.
Act according to probabilities
"""
res_logits = []
res_actions = []
cur_emb = begin_emb
for _ in range(seq_len):
out_logits, hid = self.decode_one(hid, cur_emb, context)
# Using softmax to transform logits to probabilities.
out_probs_v = F.softmax(out_logits, dim=1)
out_probs = out_probs_v.data.cpu().numpy()[0]
# np.random.choice(out_probs.shape[0], p=out_probs):
# choose one index from out_probs.shape[0] by the probabilities associated with each entry as out_probs.
action = int(np.random.choice(out_probs.shape[0], p=out_probs))
# Transform action to tensor and cast it to the device where begin_emb is in.
action_v = torch.LongTensor([action]).to(begin_emb.device)
action_v = action_v.cuda()
# Get the embedding of the sampled output token.
cur_emb = self.emb(action_v)
res_logits.append(out_logits)
res_actions.append(action)
if stop_at_token is not None and action == stop_at_token:
break
return torch.cat(res_logits), res_actions
def beam_decode(self, hid, seq_len, context, start_token, stop_at_token = None, beam_width = 10, topk = 5):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
endnodes = []
number_required = min((topk + 1), topk - len(endnodes))
# Start with the start of the sentence token: torch.LongTensor([[SOS_token]], device=device)
# starting node - hidden vector, previous node, word id, logp, length, logits
node = beam_search_node.BeamSearchNode(hid, None, start_token, 0, 1, None)
nodes = PriorityQueue()
# start the queue
# The smaller the value is, the higher the priority of the node is.
# The unique ID of a node is used to avoid conflict between elements in the heap.
nodes.put((-node.eval(), id(node), node))
qsize = 1
# start beam search
while True:
# give up when decoding takes too long
if qsize > 2000:
break
# fetch the best node
score, _, n = nodes.get()
action_v = n.wordid
# Get the embedding of the sampled output token.
decoder_input = self.emb(action_v)
decoder_hidden = n.h
# tensor.item(): if only one element is in the tensor, tensor.item() will return the value of the element.
if n.wordid.item() == stop_at_token or n.leng == seq_len and n.prevNode != None:
endnodes.append((score, id(n), n))
# if we reached maximum # of sentences required
if len(endnodes) >= number_required:
break
else:
continue
# decode for one step using decoder
# decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_output)
out_logits, decoder_hidden = self.decode_one(decoder_hidden, decoder_input, context)
decoder_output = F.log_softmax(out_logits, dim=1)
# out_probs = out_probs_v.data.cpu().numpy()[0]
# PUT HERE REAL BEAM SEARCH OF TOP
log_prob, indexes = torch.topk(decoder_output, beam_width)
nextnodes = []
for new_k in range(beam_width):
decoded_t = indexes[0][new_k].view(-1)
log_p = log_prob[0][new_k].item()
# hidden vector, previous node, word id, logp, length
node = beam_search_node.BeamSearchNode(decoder_hidden, n, decoded_t, n.logp + log_p, n.leng + 1, out_logits)
score = -node.eval()
nextnodes.append((score, node))
# put them into queue
for i in range(len(nextnodes)):
score, nn = nextnodes[i]
nodes.put((score, id(nn), nn))
# increase qsize
qsize += len(nextnodes) - 1
# choose nbest paths, back trace them
if len(endnodes) == 0:
endnodes = [nodes.get() for _ in range(topk)]
utterances = []
all_res_logits = []
# The sorted() function sorts the elements of a given iterable
# in a specific order (either ascending or descending).
# The syntax of sorted() is: sorted(iterable, key=None, reverse=False)
# This is a great way to sort a list of tuples on the second key:
# a = [ (1,'z'), (2, 'x'), (3, 'y') ]
# a.sort(key=operator.itemgetter(1))
# Or using lambda: a.sort(key=lambda x: x[1])
for score, _, n in sorted(endnodes, key=operator.itemgetter(0)):
utterance = []
res_logits = []
# back trace
while n.prevNode != None:
utterance.append(n.wordid.item())
res_logits.append(n.logits)
n = n.prevNode
# [::-1]: Reverse.
utterance = utterance[::-1]
res_logits = res_logits[::-1]
utterances.append(utterance)
all_res_logits.append(torch.cat(res_logits))
return all_res_logits, utterances
def pack_batch_no_out(self, batch, embeddings, device="cpu"):
# Asserting statements is a convenient way to insert debugging assertions into a program.
# To guarantee that the batch is a list.
assert isinstance(batch, list)
# The format of batch is a list of tuple: ((tuple),[[list of token ID list]])
# A lambda function is a small anonymous function, the example is as following.
# x = lambda a, b: a * b
# print(x(5, 6))
# Sort descending (CuDNN requirements) batch中第一个元素为最长的句子;
batch.sort(key=lambda s: len(s[0]), reverse=True)
# input_idx:一个batch的输入句子的tokens对应的ID矩阵;Each row is corresponding to one input sentence.
# output_idx:一个batch的输出句子的tokens对应的ID矩阵;Each row is corresponding to a list of several output sentences.
# zip wants a bunch of arguments to zip together, but what you have is a single argument (a list, whose elements are also lists).
# The * in a function call "unpacks" a list (or other iterable), making each of its elements a separate argument.
# For list p = [[1,2,3],[4,5,6]];
# So without the *, you're doing zip( [[1,2,3],[4,5,6]] ). With the *, you're doing zip([1,2,3], [4,5,6]) = [(1, 4), (2, 5), (3, 6)].
input_idx, output_idx = zip(*batch)
# create padded matrix of inputs
# map() function returns a list of the results after applying the given function to each item of a given iterable (list, tuple etc.)
# For example:
# numbers = (1, 2, 3, 4)
# result = map(lambda x: x + x, numbers)
# print(list(result))
# Output: {2, 4, 6, 8}
# 建立长度词典,为batch中每一个元素的长度;
lens = list(map(len, input_idx))
# 以最长的句子来建立batch*最长句子长度的全0矩阵;
input_mat = np.zeros((len(batch), lens[0]), dtype=np.int64)
# 将batch中每个句子的tokens对应的ID向量填入全0矩阵完成padding;
# idx:index,x:token ID 组成的向量;
for idx, x in enumerate(input_idx):
input_mat[idx, :len(x)] = x
# 将padding后的矩阵转换为tensor matrix;
input_v = torch.tensor(input_mat).to(device)
input_v = input_v.cuda()
# 封装成PackedSequence类型的对象;
# The padded sequence is the transposed matrix which is ``B x T x *``,
# where `T` is the length of the longest sequence and `B` is the batch size.
# Following the matrix is the list of lengths of each sequence in the batch (also in transposed format).
# For instance:
# [ a b c c d d d ]
# [ a b c d ]
# [ a b c ]
# [ a b ]
# could be transformed into [a,a,a,a,b,b,b,b,c,c,c,c,d,d,d,d] with batch size [4,4,3,2,1,1,1].
input_seq = rnn_utils.pack_padded_sequence(input_v, lens, batch_first=True)
input_seq = input_seq.cuda()
r = embeddings(input_seq.data)
# lookup embeddings;embeddings为模型已经建立的词向量矩阵;
# r: the [B x T x dimension] matrix of the embeddings of the occurred words in input sequence.
# The order is followed by the order in input_seq.
# Which is transforming [a,a,a,a,b,b,b,b,c,c,c,c,d,d,d,d] into [embedding(a), embedding(a), ..., embedding(d), embedding(d)]
r = r.cuda()
# 加入了词嵌入的input_seq;
# For instance, given data ``abc`` and `x`
# the :class:`PackedSequence` would contain data ``axbc`` with ``batch_sizes=[2,1,1]``.
# emb_input_seq is [B x T x dimension] matrix of the embeddings of the occurred words in input sequence with the batch size.
# For instance, emb_input_seq is the padded data: [embedding(a), embedding(a), ..., embedding(d), embedding(d)] with batch size [4,4,3,2,1,1,1].
emb_input_seq = rnn_utils.PackedSequence(r, input_seq.batch_sizes)
emb_input_seq = emb_input_seq.cuda()
return emb_input_seq, input_idx, output_idx
def pack_input(self, input_data, embeddings, device="cpu"):
input_v = torch.LongTensor([input_data]).to(device)
input_v = input_v.cuda()
r = embeddings(input_v)
return rnn_utils.pack_padded_sequence(r, [len(input_data)], batch_first=True)
def pack_batch(self, batch, embeddings, device="cpu"):
emb_input_seq, input_idx, output_idx = self.pack_batch_no_out(batch, embeddings, device)
# prepare output sequences, with end token stripped
output_seq_list = []
for out in output_idx:
output_seq_list.append(self.pack_input(out[:-1], embeddings, device))
return emb_input_seq, output_seq_list, input_idx, output_idx
def seq_bleu(self, model_out, ref_seq):
model_seq = torch.max(model_out.data, dim=1)[1]
model_seq = model_seq.cpu().numpy()
return utils.calc_bleu(model_seq, ref_seq)
| 22,030 | 47.10262 | 177 | py |
MRL-CQA | MRL-CQA-master/S2SRL/libbots/data.py | import collections
import os
import sys
import logging
import itertools
import pickle
import json
import torch
from . import cornell
UNKNOWN_TOKEN = '#UNK'
BEGIN_TOKEN = "#BEG"
END_TOKEN = "#END"
MAX_TOKENS = 30
MIN_TOKEN_FEQ = 1
SHUFFLE_SEED = 1987
LINE_SIZE = 50000
EMB_DICT_NAME = "emb_dict.dat"
EMB_NAME = "emb.npy"
log = logging.getLogger("data")
from itertools import islice
def save_emb_dict(dir_name, emb_dict):
with open(os.path.join(dir_name, EMB_DICT_NAME), "wb") as fd:
pickle.dump(emb_dict, fd)
def load_emb_dict(dir_name):
with open(os.path.join(dir_name, EMB_DICT_NAME), "rb") as fd:
return pickle.load(fd)
def encode_words(words, emb_dict):
"""
Convert list of words into list of embeddings indices, adding our tokens
:param words: list of strings
:param emb_dict: embeddings dictionary
:return: list of IDs
"""
res = [emb_dict[BEGIN_TOKEN]]
unk_idx = emb_dict[UNKNOWN_TOKEN]
for w in words:
idx = emb_dict.get(w.lower(), unk_idx)
res.append(idx)
res.append(emb_dict[END_TOKEN])
return res
def encode_words_for_retriever(words, emb_dict):
"""
Convert list of words into list of embeddings indices, adding our tokens
:param words: list of strings
:param emb_dict: embeddings dictionary
:return: list of IDs
"""
res = []
unk_idx = emb_dict[UNKNOWN_TOKEN]
for w in words:
idx = emb_dict.get(w.lower(), unk_idx)
res.append(idx)
return res
def encode_phrase_pairs(phrase_pairs, emb_dict, filter_unknows=True):
"""
Convert list of phrase pairs to training data
:param phrase_pairs: list of (phrase, phrase)
:param emb_dict: embeddings dictionary (word -> id)
:return: list of tuples ([input_id_seq], [output_id_seq])
"""
unk_token = emb_dict[UNKNOWN_TOKEN]
result = []
for p1, p2 in phrase_pairs:
p = encode_words(p1, emb_dict), encode_words(p2, emb_dict)
'''It is not correct to exclude the sample with 'UNK' from the dataset.'''
# if unk_token in p[0] or unk_token in p[1]:
# continue
result.append(p)
return result
def encode_phrase_pairs_RLTR(phrase_pairs, emb_dict, filter_unknows=True):
"""
Convert list of phrase pairs to training data
:param phrase_pairs: list of (phrase, phrase)
:param emb_dict: embeddings dictionary (word -> id)
:return: list of tuples ([input_id_seq], [output_id_seq])
"""
unk_token = emb_dict[UNKNOWN_TOKEN]
result = []
for p1, p2 in phrase_pairs:
p = encode_words(p1, emb_dict), p2
# STAR: It is incorrect to exclude the sample with 'UNK' from the dataset.
# if unk_token in p[0] or unk_token in p[1]:
# continue
result.append(p)
return result
# Change token list into token tuple.
def group_train_data_RLTR(training_data):
groups = []
for p1, p2 in training_data:
l = tuple(p1)
temp = (l, p2)
groups.append(temp)
return list(groups)
# Change token list into token tuple.
def group_train_data_RLTR_for_support(training_data):
groups = {}
for p1, p2 in training_data:
groups.setdefault(p2['qid'], (tuple(p1), p2))
return groups
def group_train_data(training_data):
"""
Group training pairs by first phrase
:param training_data: list of (seq1, seq2) pairs
:return: list of (seq1, [seq*]) pairs
这里的defaultdict(function_factory)构建的是一个类似dictionary的对象,
其中keys的值,自行确定赋值,但是values的类型,是function_factory的类实例,而且具有默认值。
比如defaultdict(int)则创建一个类似dictionary对象,里面任何的values都是int的实例,
而且就算是一个不存在的key, d[key] 也有一个默认值,这个默认值是int()的默认值0.
一般用法为:
d = collections.defaultdict(list)
for k, v in s:
d[k].append(v)
"""
groups = collections.defaultdict(list)
for p1, p2 in training_data:
# 取出key为tuple(p1)的value;
# If no value is assigned to the key, the default value (in this case empty list) is assigned to the key.
l = groups[tuple(p1)]
# 将p2挂在value后面,完成grouping操作;
l.append(p2)
return list(groups.items())
def group_train_data_one_to_one(training_data):
"""
Group training_data to one-to-one format.
"""
temp_list = list()
for p1, p2 in training_data:
p2_list = list()
p1 = tuple(p1)
p2_list.append(p2)
temp = (p1, p2_list)
temp_list.append(temp)
return temp_list
def iterate_batches(data, batch_size):
assert isinstance(data, list)
assert isinstance(batch_size, int)
ofs = 0
while True:
batch = data[ofs * batch_size:(ofs + 1) * batch_size]
# STAR: Why the length of a batch can not be one?
# if len(batch) <= 1:
if len(batch) < 1:
break
yield batch
ofs += 1
def get_RL_question_action_list(qpath, apath):
qdict = {}
adict = {}
with open(qpath, 'r', encoding="UTF-8") as infile:
while True:
lines_gen = list(islice(infile, LINE_SIZE))
if not lines_gen:
break
for line in lines_gen:
qid = str(line).strip().split(' ')[0]
tokens = str(line).strip().split(' ')[1:]
qdict.setdefault(qid, tokens)
with open(apath, 'r', encoding="UTF-8") as infile:
while True:
lines_gen = list(islice(infile, LINE_SIZE))
if not lines_gen:
break
for line in lines_gen:
qid = str(line).strip().split(' ')[0]
tokens = str(line).strip().split(' ')[1:]
if qid not in adict:
action_list = list()
action_list.append(tokens)
adict.setdefault(qid, action_list)
else:
action_list = adict.get(qid)
action_list.append(tokens)
adict.setdefault(qid, action_list)
return qdict, adict
def get_question_token_list(path):
with open(path, 'r', encoding="UTF-8") as infile:
token_list = list()
count = 0
while True:
lines_gen = list(islice(infile, LINE_SIZE))
if not lines_gen:
break
for line in lines_gen:
tokens = str(line).strip().split(' ')[1:]
token_list.append(tokens)
count = count + 1
# print(count)
return token_list
def get_action_token_list(path):
with open(path, 'r', encoding="UTF-8") as infile:
token_list = list()
count = 0
while True:
lines_gen = list(islice(infile, LINE_SIZE))
if not lines_gen:
break
for line in lines_gen:
tokens = str(line).strip().split(' ')[1:]
token_list.append(tokens)
count = count + 1
# print(count)
return token_list
def get_vocab(path):
with open(path, 'r', encoding="UTF-8") as infile:
token_list = list()
count = 0
while True:
lines_gen = list(islice(infile, LINE_SIZE))
if not lines_gen:
break
for line in lines_gen:
tokens = str(line).strip().lower()
token_list.append(tokens)
count = count + 1
# print(count)
return token_list
# Suppose the genre_filter is comedy and other parameters are as default.
def load_data(genre_filter, max_tokens=MAX_TOKENS, min_token_freq=MIN_TOKEN_FEQ):
dialogues = cornell.load_dialogues(genre_filter=genre_filter)
if not dialogues:
log.error("No dialogues found, exit!")
sys.exit()
log.info("Loaded %d dialogues with %d phrases, generating training pairs",
len(dialogues), sum(map(len, dialogues)))
# 将dataset中的对话前一句和后一句组合成一个training pair,作为输入和输出;
phrase_pairs = dialogues_to_pairs(dialogues, max_tokens=max_tokens)
log.info("Counting freq of words...")
# get all words;
word_counts = collections.Counter()
for dial in dialogues:
for p in dial:
word_counts.update(p)
freq_set = set(map(lambda p: p[0], filter(lambda p: p[1] >= min_token_freq, word_counts.items())))
log.info("Data has %d uniq words, %d of them occur more than %d",
len(word_counts), len(freq_set), min_token_freq)
phrase_dict = phrase_pairs_dict(phrase_pairs, freq_set)
return phrase_pairs, phrase_dict
def load_data_from_existing_data(QUESTION_PATH, ACTION_PATH, DIC_PATH, max_tokens=None):
"""
Convert dialogues to training pairs of phrases
:param dialogues:
:param max_tokens: limit of tokens in both question and reply
:return: list of (phrase, phrase) pairs
"""
question_list = get_question_token_list(QUESTION_PATH)
action_list = get_action_token_list(ACTION_PATH)
vocab_list = get_vocab(DIC_PATH)
result = []
if len(question_list) == len(action_list):
for i in range(len(question_list)):
if max_tokens is None or (len(question_list[i]) <= max_tokens and len(action_list[i]) <= max_tokens):
result.append((question_list[i], action_list[i]))
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
next_id = 3
for w in vocab_list:
if w not in res:
res[w] = next_id
next_id += 1
return result, res
def load_RL_data(QUESTION_PATH, ACTION_PATH, DIC_PATH, max_tokens=None):
qdict, adict = get_RL_question_action_list(QUESTION_PATH, ACTION_PATH)
vocab_list = get_vocab(DIC_PATH)
result = []
if len(qdict) == len(adict):
for qid, q in qdict.items():
if max_tokens is None or len(q) <= max_tokens:
if qid in adict:
if len(adict.get(qid)) > 0:
for action in adict.get(qid):
if max_tokens is None or len(action) <= max_tokens:
result.append((q, action))
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
next_id = 3
for w in vocab_list:
if w not in res:
res[w] = next_id
next_id += 1
return result, res
def load_RL_data_TR(QUESTION_PATH, DIC_PATH=None, max_tokens=None, NSM=False):
result = []
with open(QUESTION_PATH, 'r', encoding="UTF-8") as load_f:
load_dict = json.load(load_f)
for key, value in load_dict.items():
length = len(str(value['input']).strip().split(' '))
if 'entity_mask' in value and 'relation_mask' in value and 'type_mask' in value and 'response_bools' in value and 'response_entities' in value and 'orig_response' in value and 'question' in value and length <= max_tokens:
if not NSM:
question_info = {'qid': key, 'entity_mask': value['entity_mask'],
'relation_mask': value['relation_mask'],
'type_mask': value['type_mask'], 'response_bools': value['response_bools'],
'response_entities': value['response_entities'],
'orig_response': value['orig_response']}
else:
question_info = {'qid': key, 'entity_mask': value['entity_mask'],
'relation_mask': value['relation_mask'],
'type_mask': value['type_mask'], 'response_bools': value['response_bools'],
'response_entities': value['response_entities'],
'orig_response': value['orig_response'],
'pseudo_gold_program': value['pseudo_gold_program']}
result.append((str(value['input']).strip().split(' '), question_info))
else:
continue
if (DIC_PATH != None):
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
vocab_list = get_vocab(DIC_PATH)
next_id = 3
for w in vocab_list:
if w not in res:
res[w] = next_id
next_id += 1
return result, res
else:
return result
def load_RL_data_TR_INT(QUESTION_PATH, DIC_PATH=None, max_tokens=None, NSM=False):
result = []
with open(QUESTION_PATH, 'r', encoding="UTF-8") as load_f:
load_dict = json.load(load_f)
for key, value in load_dict.items():
length = len(str(value['input']).strip().split(' '))
if 'entity_mask' in value and 'relation_mask' in value and 'type_mask' in value and 'response_bools' in value and 'response_entities' in value and 'orig_response' in value and 'question' in value and 'int_mask' in value and length <= max_tokens:
if not NSM:
question_info = {'qid': key, 'entity_mask': value['entity_mask'],
'relation_mask': value['relation_mask'],
'type_mask': value['type_mask'], 'response_bools': value['response_bools'],
'response_entities': value['response_entities'],
'orig_response': value['orig_response'], 'int_mask': value['int_mask']}
else:
question_info = {'qid': key, 'entity_mask': value['entity_mask'],
'relation_mask': value['relation_mask'],
'type_mask': value['type_mask'], 'response_bools': value['response_bools'],
'response_entities': value['response_entities'],
'orig_response': value['orig_response'], 'int_mask': value['int_mask'],
'pseudo_gold_program': value['pseudo_gold_program']}
result.append((str(value['input']).strip().split(' '), question_info))
else:
continue
if (DIC_PATH != None):
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
vocab_list = get_vocab(DIC_PATH)
next_id = 3
for w in vocab_list:
if w not in res:
res[w] = next_id
next_id += 1
return result, res
else:
return result
def load_data_MAML(QUESTION_PATH, DIC_PATH=None, max_tokens=None):
result = []
with open(QUESTION_PATH, 'r', encoding="UTF-8") as load_f:
load_dict = json.load(load_f)
for key, value in load_dict.items():
length = len(str(value['input']).strip().split(' '))
if max_tokens is None or length <= max_tokens:
if 'entity_mask' in value and 'relation_mask' in value and 'type_mask' in value and 'response_bools' in value and 'response_entities' in value and 'orig_response' in value and 'question' in value:
question_info = {'qid': key, 'entity_mask': value['entity_mask'],
'relation_mask': value['relation_mask'],
'type_mask': value['type_mask'], 'response_bools': value['response_bools'],
'response_entities': value['response_entities'],
'orig_response': value['orig_response'],
'entity': value['entity'], 'relation': value['relation'], 'type': value['type'],
'question': value['question']}
result.append((str(value['input']).strip().split(' '), question_info))
else:
continue
if (DIC_PATH != None):
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
vocab_list = get_vocab(DIC_PATH)
next_id = 3
for w in vocab_list:
if w not in res:
res[w] = next_id
next_id += 1
return result, res
else:
return result
# TODO: unify load_data_MAML_TEST with load_data_MAML with using 'response_bools';
def load_data_MAML_TEST(QUESTION_PATH, DIC_PATH=None, max_tokens=None):
result = []
with open(QUESTION_PATH, 'r', encoding="UTF-8") as load_f:
load_dict = json.load(load_f)
for key, value in load_dict.items():
length = len(str(value['input']).strip().split(' '))
if max_tokens is None or length <= max_tokens:
if 'entity_mask' in value and 'relation_mask' in value and 'type_mask' in value and 'response_entities' in value and 'orig_response' in value and 'question' in value:
question_info = {'qid': key, 'entity_mask': value['entity_mask'],
'relation_mask': value['relation_mask'],
'type_mask': value['type_mask'], 'response_entities': value['response_entities'],
'orig_response': value['orig_response'], 'entity': value['entity'],
'relation': value['relation'], 'type': value['type'],
'question': value['question']}
result.append((str(value['input']).strip().split(' '), question_info))
else:
continue
if (DIC_PATH != None):
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
vocab_list = get_vocab(DIC_PATH)
next_id = 3
for w in vocab_list:
if w not in res:
res[w] = next_id
next_id += 1
return result, res
else:
return result
def load_dict(DIC_PATH=None):
if (DIC_PATH != None):
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
vocab_list = get_vocab(DIC_PATH)
next_id = 3
for w in vocab_list:
if w not in res:
res[w] = next_id
next_id += 1
return res
else:
return {}
def phrase_pairs_dict(phrase_pairs, freq_set):
"""
Return the dict of words in the dialogues mapped to their IDs
:param phrase_pairs: list of (phrase, phrase) pairs
:return: dict
"""
res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
next_id = 3
for p1, p2 in phrase_pairs:
for w in map(str.lower, itertools.chain(p1, p2)):
if w not in res and w in freq_set:
res[w] = next_id
next_id += 1
return res
# 将dataset中的对话前一句和后一句组合成一个training pair;
def dialogues_to_pairs(dialogues, max_tokens=None):
"""
Convert dialogues to training pairs of phrases
:param dialogues:
:param max_tokens: limit of tokens in both question and reply
:return: list of (phrase, phrase) pairs
"""
result = []
for dial in dialogues:
prev_phrase = None
for phrase in dial:
if prev_phrase is not None:
if max_tokens is None or (len(prev_phrase) <= max_tokens and len(phrase) <= max_tokens):
result.append((prev_phrase, phrase))
prev_phrase = phrase
return result
def decode_words(indices, rev_emb_dict):
return [rev_emb_dict.get(idx, UNKNOWN_TOKEN) for idx in indices]
def trim_tokens_seq(tokens, end_token):
res = []
for t in tokens:
res.append(t)
if t == end_token:
break
return res
def split_train_test(data, train_ratio=0.90):
count = int(len(data) * train_ratio)
return data[:count], data[count:]
def get944k(path):
with open(path, "r", encoding='UTF-8') as CSQA_List:
dict944k = json.load(CSQA_List)
return dict944k
def get_webqsp(path):
with open(path, "r", encoding='UTF-8') as WEBQSP_List:
dict_webqsp = json.load(WEBQSP_List)
return dict_webqsp
def get_docID_indices(order_list):
did_indices = {}
d_list = []
next_id = 0
for w in order_list:
if not len(w) < 1:
docID, document = list(w.items())[0]
if docID not in did_indices:
did_indices[docID] = next_id
d_list.append(document)
next_id += 1
return did_indices, d_list
def get_ordered_docID_document(filepath):
with open(filepath, 'r', encoding="UTF-8") as load_f:
return (json.load(load_f))
def load_json(QUESTION_PATH):
with open(QUESTION_PATH, 'r', encoding="UTF-8") as load_f:
load_dict = json.load(load_f)
return load_dict
def get_qid_question_pairs(filepath):
pair = {}
pair_list = get_ordered_docID_document(filepath)
for temp in pair_list:
docID, document = list(temp.items())[0]
pair[docID] = document
return pair
def get_question_embedding(question, emb_dict, net):
question_token = question.lower().replace('?', '')
question_token = question_token.replace(',', ' ')
question_token = question_token.replace(':', ' ')
question_token = question_token.replace('(', ' ')
question_token = question_token.replace(')', ' ')
question_token = question_token.replace('"', ' ')
question_token = question_token.strip().split()
question_token_indices = [emb_dict['#UNK'] if token not in emb_dict else emb_dict[token] for token in
question_token]
question_token_embeddings = net.emb(torch.tensor(question_token_indices, requires_grad=False).cuda())
question_embeddings = torch.mean(question_token_embeddings, 0).view(1, -1)
question_embeddings = torch.tensor(question_embeddings.tolist(), requires_grad=False).cuda()
return question_embeddings
| 21,655 | 35.705085 | 257 | py |
MRL-CQA | MRL-CQA-master/S2SRL/libbots/metalearner.py | import torch
from torch.nn.utils.convert_parameters import (vector_to_parameters,
parameters_to_vector)
from . import data, model, utils, retriever, reparam_module, adabound
import torch.optim as optim
import torch.nn.functional as F
import random
import logging
from torch.utils.data.sampler import WeightedRandomSampler
log = logging.getLogger("MetaLearner")
class MetaLearner(object):
"""Meta-learner
The meta-learner is responsible for sampling the trajectories/episodes
(before and after the one-step adaptation), compute the inner loss, compute
the updated parameters based on the inner-loss, and perform the meta-update.
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Richard Sutton, Andrew Barto, "Reinforcement learning: An introduction",
2018 (http://incompleteideas.net/book/the-book-2nd.html)
[3] John Schulman, Philipp Moritz, Sergey Levine, Michael Jordan,
Pieter Abbeel, "High-Dimensional Continuous Control Using Generalized
Advantage Estimation", 2016 (https://arxiv.org/abs/1506.02438)
[4] John Schulman, Sergey Levine, Philipp Moritz, Michael I. Jordan,
Pieter Abbeel, "Trust Region Policy Optimization", 2015
(https://arxiv.org/abs/1502.05477)
"""
def __init__(self, net=None, retriever_net=None, device='cpu', beg_token=None, end_token = None, adaptive=False, samples=5, train_data_support_944K=None, rev_emb_dict=None, first_order=False, fast_lr=0.001, meta_optimizer_lr=0.0001, dial_shown = False, dict=None, dict_weak=None, steps=5, weak_flag=False, query_embed=True):
self.net = net
self.retriever_net = retriever_net
self.device = device
self.beg_token = beg_token
self.end_token = end_token
# The training data from which the top-N samples (support set) are found.
self.train_data_support_944K = train_data_support_944K
self.rev_emb_dict = rev_emb_dict
self.adaptive = adaptive
self.samples = samples
self.first_order = first_order
self.fast_lr = fast_lr
self.meta_optimizer_lr = meta_optimizer_lr
# self.meta_optimizer = optim.Adam(self.trainable_parameters(), lr=args.meta_learning_rate, amsgrad=False)
# self.meta_optimizer = optim.Adam(net.parameters(), lr=meta_optimizer_lr, eps=1e-3)
self.meta_optimizer = None if net is None else optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=meta_optimizer_lr, eps=1e-3)
# self.inner_optimizer = optim.Adam(net.parameters(), lr=fast_lr, eps=1e-3)
self.inner_optimizer = None if net is None else optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=fast_lr, eps=1e-3)
self.dial_shown = dial_shown
self.retriever = None if (dict is None or dict_weak is None) else retriever.Retriever(dict, dict_weak)
self.retriever_optimizer = None if retriever_net is None else adabound.AdaBound(filter(lambda p: p.requires_grad, retriever_net.parameters()), lr=1e-3, final_lr=0.1)
self.steps = steps
self.weak_flag = weak_flag
self.query_embed = query_embed
'''# note: Reparametrize it!
self.reparam_net = reparam_module.ReparamModule(self.net)
print(f"reparam_net has {self.reparam_net.param_numel} parameters")
# module.parameters() is traversing the parameters by elements in the module._parameters.items().
# Tensors like weights or biases are stored in the module._parameters, and the items in _parameters and weights/biases are the same items.
# In class ReparamModule(net), the weights/biases of net have been deleted from each module's _parameters.
# b = reparam_net.parameters()
# for param in b:
# print (param)
# (reparam_net.flat_param,): Creating a tuple with one element
# parameters(): get items from module._parameters.items()
assert tuple(self.reparam_net.parameters()) == (self.reparam_net.flat_param,)
print(f"reparam_net now has **only one** vector parameter of shape {self.reparam_net.flat_param.shape}")'''
def reptile_meta_update(self, running_vars, old_param_dict, beta):
for (name, param) in self.net.named_parameters():
if param.requires_grad:
mean = torch.stack(running_vars[name]).mean(0).clone().detach()
old = old_param_dict[name].clone().detach()
param.data = old + beta * (mean - old)
def first_order_meta_update(self, grads_list, old_param_dict):
# self.named_parameters() is consisted of an iterator over module parameters,
# yielding both the name of the parameter as well as the parameter itself.
# grads is the gradient of self.parameters().
# Each module parameter is computed as parameter = parameter - step_size * grad.
# When being saved in the OrderedDict of self.named_parameters(), it likes:
# OrderedDict([('sigma', Parameter containing:
# tensor([0.6931, 0.6931], requires_grad=True)), ('0.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('0.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True)), ('1.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('1.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True))])
# Theta <- Theta - beta * (1/k) * ∑_(i=1:k)[grad(loss_theta_in/theta_in)]
# Update the value of the current parameters of the model.
for (name, param) in self.net.named_parameters():
if param.requires_grad:
average_grad = torch.stack(grads_list[name]).mean(0).clone().detach()
# old_param = old_param_dict[name].clone().detach()
param.data = old_param_dict[name].clone().detach() - self.meta_optimizer_lr * average_grad
def meta_update(self, loss):
"""
Applies an outer loop update on the meta-parameters of the model.
:param loss: The current crossentropy loss.
"""
# optimizer.zero_grad() clears x.grad for every parameter x in the optimizer.
# It’s important to call this before loss.backward(),
# otherwise you’ll accumulate the gradients from multiple passes.
self.meta_optimizer.zero_grad()
# loss.backward() computes dloss/dx for every parameter x which has requires_grad=True.
# These are accumulated into x.grad for every parameter x. In pseudo-code:
# x.grad += dloss/dx
loss.backward()
# To conduct a gradient ascent to minimize the loss (which is to maximize the reward).
# optimizer.step updates the value of x using the gradient x.grad.
# For example, the SGD optimizer performs:
# x += -lr * x.grad
self.meta_optimizer.step()
def reparam_meta_update(self, loss):
"""
Applies an outer loop update on the meta-parameters of the model.
:param loss: The current crossentropy loss.
"""
# optimizer.zero_grad() clears x.grad for every parameter x in the optimizer.
# It’s important to call this before loss.backward(),
# otherwise you’ll accumulate the gradients from multiple passes.
# self.reparam_net.zero_grad()
theta_0 = self.reparam_net.flat_param
# loss.backward() computes dloss/dx for every parameter x which has requires_grad=True.
# These are accumulated into x.grad for every parameter x. In pseudo-code:
# x.grad += dloss/dx
loss.backward()
# To conduct a gradient ascent to minimize the loss (which is to maximize the reward).
# optimizer.step updates the value of x using the gradient x.grad.
# For example, the SGD optimizer performs:
# x += -lr * x.grad
theta_0.data = theta_0.data - self.meta_optimizer_lr * theta_0.grad.data
def get_inner_loop_parameter_dict(self, params):
"""
Returns a dictionary with the parameters to use for inner loop updates.
:param params: A dictionary of the network's parameters.
:return: A dictionary of the parameters to use for the inner loop optimization process.
"""
param_dict = dict()
for name, param in params:
if param.requires_grad:
# When you get the parameters of your net, it does not clone the tensors.
# So in your case, before and after contain the same tensors.
# So when the optimizer update the weights in place, it updates both your lists.
# You could call .clone() on each parameter so that a deep copy will be used to solve the problem.
# clone(): Returns a copy of the self tensor. The copy has the same size and data type as self.
# Unlike copy_(), this function is recorded in the computation graph.
# Gradients propagating to the cloned tensor will propagate to the original tensor.
# But when using optimizer.step(), the cloned new parameters will not update.
# param_dict[name] = param.to(device=self.device)
# star: shouldn't it be param.to(device=self.device).clone().detach()?
# Since 'gradients propagating to the cloned tensor will propagate to the original tensor',
# when using the cloned parameters to compute gradients, the gradients will be accumlated in the original tensor
# but not the cloned ones.
param_dict[name] = param.to(device=self.device).clone()
return param_dict
def get_net_named_parameter(self):
"""
Returns a dictionary with the parameters to use for inner loop updates.
:param params: A dictionary of the network's parameters.
:return: A dictionary of the parameters to use for the inner loop optimization process.
"""
params = self.net.named_parameters()
param_dict = dict()
for name, param in params:
if param.requires_grad:
# When you get the parameters of your net, it does not clone the tensors.
# So in your case, before and after contain the same tensors.
# So when the optimizer update the weights in place, it updates both your lists.
# You could call .clone() on each parameter so that a deep copy will be used to solve the problem.
# clone(): Returns a copy of the self tensor. The copy has the same size and data type as self.
# Unlike copy_(), this function is recorded in the computation graph.
# Gradients propagating to the cloned tensor will propagate to the original tensor.
# But when using optimizer.step(), the cloned new parameters will not update.
# param_dict[name] = param.to(device=self.device)
# Since 'gradients propagating to the cloned tensor will propagate to the original tensor',
# when using the cloned parameters to compute gradients, the gradients will be accumlated in the original tensor
# but not the cloned ones.
param_dict[name] = param.to(device=self.device).clone().detach()
return param_dict
def get_net_parameter(self):
"""
Returns a dictionary with the parameters to use for inner loop updates.
:param params: A dictionary of the network's parameters.
:return: A dictionary of the parameters to use for the inner loop optimization process.
"""
params = self.net.named_parameters()
param_dict = dict()
for name, param in params:
param_dict[name] = param.to(device=self.device).clone().detach()
return param_dict
def establish_support_set(self, task, N=5, weak=False, train_data_support_944K=None):
# Find top-N in train_data_support;
# get_top_N(train_data, train_data_support, N)
# Support set is none. Use the training date per se as support set.
batch = list()
if N == 0:
batch.append(task)
else:
key_name, key_weak, question, qid = self.retriever.AnalyzeQuestion(task[1])
topNList = self.retriever.RetrieveWithMaxTokens(N, key_name, key_weak, question, train_data_support_944K, weak, qid)
for name in topNList:
qid = list(name.keys())[0] if len(name) > 0 else 'NONE'
if qid in train_data_support_944K:
batch.append(train_data_support_944K[qid])
if len(batch) == N:
break
return batch
# Randomly select training samples as support set.
def establish_random_support_set(self, task=None, N=5, train_data_support_944K=None):
batch = list()
if N == 0:
batch.append(task)
else:
key_name, key_weak, question, qid = self.retriever.AnalyzeQuestion(task[1])
topNList = self.retriever.RetrieveRandomSamplesWithMaxTokens(N=N, key_weak=key_weak, train_data_944k=train_data_support_944K, qid=qid)
for name in topNList:
qid = list(name.keys())[0] if len(name) > 0 else 'NONE'
if qid in train_data_support_944K:
batch.append(train_data_support_944K[qid])
if len(batch) == N:
break
return batch
# Return support samples strictly following the ranking.
def establish_support_set_by_retriever_argmax(self, task, N=5, train_data_support_944K=None, docID_dict=None, rev_docID_dict=None, emb_dict=None, qtype_docs_range=None):
# Find top-N in train_data_support;
# get_top_N(train_data, train_data_support, N)
# Support set is none. Use the training date per se as support set.
# temp_dict = self.retriever_net.get_retriever_net_parameter()
batch = list()
if N == 0:
batch.append(task)
else:
key_name, key_weak, question, qid = self.retriever.AnalyzeQuestion(task[1])
if not self.query_embed:
query_tensor = torch.tensor(self.retriever_net.pack_input(docID_dict['qid']).tolist(), requires_grad=False).cuda()
else:
query_tensor = data.get_question_embedding(question, emb_dict, self.net)
if key_weak in qtype_docs_range:
document_range = (qtype_docs_range[key_weak]['start'], qtype_docs_range[key_weak]['end'])
else:
document_range = (0, len(docID_dict))
logsoftmax_output, _, _ = self.retriever_net(query_tensor, document_range)
orders = torch.topk(logsoftmax_output, N+10)
# order_temp = self.retriever_net.calculate_rank(logsoftmax_output.tolist())
# first_index = 0
# for i, temp in enumerate(order_temp):
# if temp == 1:
# first_index = i
# break
order_list = orders[1].tolist()
topNIndices = [k+document_range[0] for k in order_list]
topNList = [rev_docID_dict[k] for k in topNIndices]
for qid in topNList:
if qid in train_data_support_944K:
batch.append(train_data_support_944K[qid])
if len(batch) == N:
break
return batch
# Return support samples strictly following the probability distribution.
def establish_support_set_by_retriever_sampling(self, task, N=5, train_data_support_944K=None, docID_dict=None, rev_docID_dict=None, emb_dict=None, qtype_docs_range=None, number_of_supportsets=5):
retriever_total_samples = 0
retriever_skip_samples = 0
batch_list = list()
logprob_list = list()
key_name, key_weak, question, qid = self.retriever.AnalyzeQuestion(task[1])
if not self.query_embed:
query_tensor = torch.tensor(self.retriever_net.pack_input(docID_dict['qid']).tolist(),
requires_grad=False).cuda()
else:
query_tensor = data.get_question_embedding(question, emb_dict, self.net)
if key_weak in qtype_docs_range:
document_range = (qtype_docs_range[key_weak]['start'], qtype_docs_range[key_weak]['end'])
else:
document_range = (0, len(docID_dict))
logsoftmax_output, softmax_output, cos_output = self.retriever_net(query_tensor, document_range)
# Get top N+45 samples.
# A namedtuple of (values, indices) is returned,
# where the indices are the indices of the elements in the original input tensor.
orders = torch.topk(cos_output, N+45)
order_list = orders[1].tolist()
# To confine the search space in the top N+45 samples with the highest probabilities.
order_softmax_output_prob = [softmax_output[x] for x in order_list]
qid_lists = list()
for i in range(number_of_supportsets):
batch = list()
logprob_for_samples = list()
# Samples elements from [0,..,len(weights)-1] with probability of top N+45 samples.
# You can also use the keyword replace=False to change the behavior so that sampling is without replacement.
# WeightedRandomSampler: Samples elements from [0,..,len(weights)-1] with given probabilities (weights).
draw = list(WeightedRandomSampler(order_softmax_output_prob, N, replacement=False))
draw_list = [order_list[j] for j in draw]
# draw_without_filtering = list(WeightedRandomSampler(softmax_output, N+10, replacement=False))
# topNIndices_1 = [k + document_range[0] for k in draw_without_filtering]
# logprobs_1 = [logsoftmax_output[k] for k in draw_without_filtering]
# topNList_1 = [rev_docID_dict[k] for k in topNIndices_1]
topNIndices = [k + document_range[0] for k in draw_list]
logprobs = [logsoftmax_output[k] for k in draw_list]
topNList = [rev_docID_dict[k] for k in topNIndices]
qids = list()
for qid, logprob in zip(topNList, logprobs):
if qid in train_data_support_944K:
batch.append(train_data_support_944K[qid])
logprob_for_samples.append(logprob)
qids.append(qid)
if len(batch) == N:
break
qids.sort()
retriever_total_samples += 1
if len(qid_lists) == 0:
batch_list.append(batch)
logprob_list.append(torch.stack(logprob_for_samples))
qid_lists.append(qids)
else:
identical_flag = False
for qids_temp in qid_lists:
if qids_temp == qids:
retriever_skip_samples += 1
identical_flag = True
break
if not identical_flag:
qid_lists.append(qids)
batch_list.append(batch)
logprob_list.append(torch.stack(logprob_for_samples))
return batch_list, logprob_list, retriever_total_samples, retriever_skip_samples
def reparam_update_params(self, inner_loss, theta, lr=0.1, first_order=False):
# NOTE: what is meaning of one step here? What is one step?
# One step here means the loss of a batch of samples (a task) are computed.
"""Apply one step of gradient descent on the loss function `loss`, with
step-size `step_size`, and returns the updated parameters of the neural
network.
"""
# nn.Module.zero_grad() Sets gradients of all model parameters to zero.
# It’s important to call this before loss.backward(),
# otherwise you’ll accumulate the gradients from multiple passes.
# self.net.zero_grad(names_weights_copy)
# self.reparam_net.zero_grad()
# create_graph (bool, optional) – If True, graph of the derivative will be constructed,
# allowing to compute higher order derivative products. Defaults to False.
# first_order is set as false and not first_order is true, so create_graph is set as true,
# which means allowing to compute higher order derivative products.
# self.parameters(): Returns an iterator over module parameters.
# torch.autograd.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False):
# outputs (sequence of Tensor) – outputs of the differentiated function.
# inputs (sequence of Tensor) – Inputs w.r.t. which the gradient will be returned (and not accumulated into .grad).
# In this case, the gradient of self.parameters() is inputs.
# The autograd.grad function returns an object that match the inputs argument,
# so we could get the the gradient of self.parameters() as returned value.
# Here if we do not use the first_order configuration, we set it as true.
# grads = torch.autograd.grad(inner_loss, names_weights_copy.values(),
# create_graph=not first_order)
gtheta, = torch.autograd.grad(inner_loss, theta,
create_graph=not first_order)
# update
theta = theta - lr * gtheta
return theta
def update_params(self, inner_loss, names_weights_copy, step_size=0.1, first_order=False):
# NOTE: what is meaning of one step here? What is one step?
# One step here means the loss of a batch of samples (a task) are computed.
"""Apply one step of gradient descent on the loss function `loss`, with
step-size `step_size`, and returns the updated parameters of the neural
network.
"""
# nn.Module.zero_grad() Sets gradients of all model parameters to zero.
# It’s important to call this before loss.backward(),
# otherwise you’ll accumulate the gradients from multiple passes.
# self.net.zero_grad(names_weights_copy)
self.net.zero_grad()
# create_graph (bool, optional) – If True, graph of the derivative will be constructed,
# allowing to compute higher order derivative products. Defaults to False.
# first_order is set as false and not first_order is true, so create_graph is set as true,
# which means allowing to compute higher order derivative products.
# self.parameters(): Returns an iterator over module parameters.
# torch.autograd.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False):
# outputs (sequence of Tensor) – outputs of the differentiated function.
# inputs (sequence of Tensor) – Inputs w.r.t. which the gradient will be returned (and not accumulated into .grad).
# In this case, the gradient of self.parameters() is inputs.
# The autograd.grad function returns an object that match the inputs argument,
# so we could get the the gradient of self.parameters() as returned value.
# Here if we do not use the first_order configuration, we set it as true.
# grads = torch.autograd.grad(inner_loss, names_weights_copy.values(),
# create_graph=not first_order)
grads = torch.autograd.grad(inner_loss, self.net.parameters(),
create_graph=not first_order)
# self.named_parameters() is consisted of an iterator over module parameters,
# yielding both the name of the parameter as well as the parameter itself.
# grads is the gradient of self.parameters().
# Each module parameter is computed as parameter = parameter - step_size * grad.
# When being saved in the OrderedDict of self.named_parameters(), it likes:
# OrderedDict([('sigma', Parameter containing:
# tensor([0.6931, 0.6931], requires_grad=True)), ('0.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('0.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True)), ('1.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('1.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True))])
updated_names_weights_dict = dict()
# names_grads_wrt_params = dict(zip(names_weights_copy.keys(), grads))
# for key in names_grads_wrt_params.keys():
# print(str(key)+': ')
# print('names_weights_copy:')
# print(names_weights_copy[key])
# print('names_grads_wrt_params:')
# print(names_grads_wrt_params[key])
# updated_names_weights_dict[key] = names_weights_copy[key] - step_size * names_grads_wrt_params[key]
# print('updated_names_weights_dict:')
# print(updated_names_weights_dict[key])
for (name, param), grad in zip(self.net.named_parameters(), grads):
updated_names_weights_dict[name] = param - step_size * grad
# print(str(name) + ': ')
# print('self.net.named_parameters:')
# print(param)
# print('grad:')
# print(grad)
# print('updated_names_weights_dict:')
# print(updated_names_weights_dict[name])
return updated_names_weights_dict
# The loss used to calculate theta' for each pseudo-task.
# Compute the inner loss for the one-step gradient update.
# The inner loss is REINFORCE with baseline [2].
def inner_loss(self, task, weights=None, dial_shown=True):
total_samples = 0
skipped_samples = 0
batch = list()
batch.append(task)
true_reward_argmax_step = []
true_reward_sample_step = []
# TODO
if weights is not None:
self.net.insert_new_parameter(weights, True)
# input_seq: the padded and embedded batch-sized input sequence.
# input_batch: the token ID matrix of batch-sized input sequence. Each row is corresponding to one input sentence.
# output_batch: the token ID matrix of batch-sized output sequences. Each row is corresponding to a list of several output sentences.
input_seq, input_batch, output_batch = self.net.pack_batch_no_out(batch, self.net.emb, self.device)
input_seq = input_seq.cuda()
# Get (two-layer) hidden state of encoder of samples in batch.
# enc = net.encode(input_seq)
context, enc = self.net.encode_context(input_seq)
net_policies = []
net_actions = []
net_advantages = []
# Transform ID to embedding.
beg_embedding = self.net.emb(self.beg_token)
beg_embedding = beg_embedding.cuda()
for idx, inp_idx in enumerate(input_batch):
# # Test whether the input sequence is correctly transformed into indices.
# input_tokens = [rev_emb_dict[temp_idx] for temp_idx in inp_idx]
# print (input_tokens)
# Get IDs of reference sequences' tokens corresponding to idx-th input sequence in batch.
qa_info = output_batch[idx]
# print(" Support sample %s is training..." % (qa_info['qid']))
# print (qa_info['qid'])
# # Get the (two-layer) hidden state of encoder of idx-th input sequence in batch.
item_enc = self.net.get_encoded_item(enc, idx)
# # 'r_argmax' is the list of out_logits list and 'actions' is the list of output tokens.
# # The output tokens are generated greedily by using chain_argmax (using last setp's output token as current input token).
r_argmax, actions = self.net.decode_chain_argmax(item_enc, beg_embedding, data.MAX_TOKENS, context[idx],
stop_at_token=self.end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# Get the highest BLEU score as baseline used in self-critic.
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
# argmax_reward = utils.calc_True_Reward(action_tokens, qa_info, self.adaptive)
argmax_reward = random.random()
true_reward_argmax_step.append(argmax_reward)
# # In this case, the BLEU score is so high that it is not needed to train such case with RL.
# if not args.disable_skip and argmax_reward > 0.99:
# skipped_samples += 1
# continue
# In one epoch, when model is optimized for the first time, the optimized result is displayed here.
# After that, all samples in this epoch don't display anymore.
if not dial_shown:
# data.decode_words transform IDs to tokens.
log.info("Input: %s", utils.untokenize(data.decode_words(inp_idx, self.rev_emb_dict)))
log.info("Argmax: %s, reward=%.4f", utils.untokenize(data.decode_words(actions, self.rev_emb_dict)),
argmax_reward)
action_memory = list()
for _ in range(self.samples):
# 'r_sample' is the list of out_logits list and 'actions' is the list of output tokens.
# The output tokens are sampled following probabilitis by using chain_sampling.
r_sample, actions = self.net.decode_chain_sampling(item_enc, beg_embedding, data.MAX_TOKENS,
context[idx], stop_at_token=self.end_token)
total_samples += 1
# Omit duplicate action sequence to decrease the computing time and to avoid the case that
# the probability of such kind of duplicate action sequences would be increased redundantly and abnormally.
duplicate_flag = False
if len(action_memory) > 0:
for temp_list in action_memory:
if utils.duplicate(temp_list, actions):
duplicate_flag = True
break
if not duplicate_flag:
action_memory.append(actions)
else:
skipped_samples += 1
continue
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
# sample_reward = utils.calc_True_Reward(action_tokens, qa_info, self.adaptive)
sample_reward = random.random()
true_reward_sample_step.append(sample_reward)
if not dial_shown:
log.info("Sample: %s, reward=%.4f", utils.untokenize(data.decode_words(actions, self.rev_emb_dict)),
sample_reward)
net_policies.append(r_sample)
net_actions.extend(actions)
# Regard argmax_bleu calculated from decode_chain_argmax as baseline used in self-critic.
# Each token has same reward as 'sample_bleu - argmax_bleu'.
# [x] * y: stretch 'x' to [1*y] list in which each element is 'x'.
# # If the argmax_reward is 1.0, then whatever the sample_reward is,
# # the probability of actions that get reward = 1.0 could not be further updated.
# # The GAMMA is used to adjust this scenario.
# if argmax_reward == 1.0:
# net_advantages.extend([sample_reward - argmax_reward + GAMMA] * len(actions))
# else:
# net_advantages.extend([sample_reward - argmax_reward] * len(actions))
net_advantages.extend([sample_reward - argmax_reward] * len(actions))
if not net_policies:
log.info("The net_policies is empty!")
# TODO the format of 0.0 should be the same as loss_v.
return 0.0, total_samples, skipped_samples
# Data for decode_chain_sampling samples and the number of such samples is the same as args.samples parameter.
# Logits of all output tokens whose size is N * output vocab size; N is the number of output tokens of decode_chain_sampling samples.
policies_v = torch.cat(net_policies)
policies_v = policies_v.cuda()
# Indices of all output tokens whose size is 1 * N;
actions_t = torch.LongTensor(net_actions).to(self.device)
actions_t = actions_t.cuda()
# All output tokens reward whose size is 1 * N;
adv_v = torch.FloatTensor(net_advantages).to(self.device)
adv_v = adv_v.cuda()
# Compute log(softmax(logits)) of all output tokens in size of N * output vocab size;
log_prob_v = F.log_softmax(policies_v, dim=1)
log_prob_v = log_prob_v.cuda()
# Q_1 = Q_2 =...= Q_n = BLEU(OUT,REF);
# ▽J = Σ_n[Q▽logp(T)] = ▽Σ_n[Q*logp(T)] = ▽[Q_1*logp(T_1)+Q_2*logp(T_2)+...+Q_n*logp(T_n)];
# log_prob_v[range(len(net_actions)), actions_t]: for each output, get the output token's log(softmax(logits)).
# adv_v * log_prob_v[range(len(net_actions)), actions_t]:
# get Q * logp(T) for all tokens of all decode_chain_sampling samples in size of 1 * N;
log_prob_actions_v = adv_v * log_prob_v[range(len(net_actions)), actions_t]
log_prob_actions_v = log_prob_actions_v.cuda()
# For the optimizer is Adam (Adaptive Moment Estimation) which is a optimizer used for gradient descent.
# Therefore, to maximize ▽J (log_prob_actions_v) is to minimize -▽J.
# .mean() is to calculate Monte Carlo sampling.
loss_policy_v = -log_prob_actions_v.mean()
loss_policy_v = loss_policy_v.cuda()
loss_v = loss_policy_v
return loss_v, total_samples, skipped_samples, true_reward_argmax_step, true_reward_sample_step
def first_order_inner_loss(self, task, dial_shown=True, mc=False):
total_samples = 0
skipped_samples = 0
batch = list()
batch.append(task)
true_reward_argmax_step = []
true_reward_sample_step = []
# input_seq: the padded and embedded batch-sized input sequence.
# input_batch: the token ID matrix of batch-sized input sequence. Each row is corresponding to one input sentence.
# output_batch: the token ID matrix of batch-sized output sequences. Each row is corresponding to a list of several output sentences.
input_seq, input_batch, output_batch = self.net.pack_batch_no_out(batch, self.net.emb, self.device)
input_seq = input_seq.cuda()
# Get (two-layer) hidden state of encoder of samples in batch.
# enc = net.encode(input_seq)
context, enc = self.net.encode_context(input_seq)
net_policies = []
net_actions = []
net_advantages = []
net_losses = []
# Transform ID to embedding.
beg_embedding = self.net.emb(self.beg_token)
beg_embedding = beg_embedding.cuda()
for idx, inp_idx in enumerate(input_batch):
# # Test whether the input sequence is correctly transformed into indices.
# input_tokens = [rev_emb_dict[temp_idx] for temp_idx in inp_idx]
# print (input_tokens)
# Get IDs of reference sequences' tokens corresponding to idx-th input sequence in batch.
qa_info = output_batch[idx]
# print(" Support sample %s is training..." % (qa_info['qid']))
# print (qa_info['qid'])
# # Get the (two-layer) hidden state of encoder of idx-th input sequence in batch.
item_enc = self.net.get_encoded_item(enc, idx)
# # 'r_argmax' is the list of out_logits list and 'actions' is the list of output tokens.
# # The output tokens are generated greedily by using chain_argmax (using last setp's output token as current input token).
r_argmax, actions = self.net.decode_chain_argmax(item_enc, beg_embedding, data.MAX_TOKENS, context[idx],
stop_at_token=self.end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# Get the highest BLEU score as baseline used in self-critic.
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
argmax_reward = utils.calc_True_Reward(action_tokens, qa_info, self.adaptive)
# argmax_reward = random.random()
true_reward_argmax_step.append(argmax_reward)
# # In this case, the BLEU score is so high that it is not needed to train such case with RL.
# if not args.disable_skip and argmax_reward > 0.99:
# skipped_samples += 1
# continue
# In one epoch, when model is optimized for the first time, the optimized result is displayed here.
# After that, all samples in this epoch don't display anymore.
if not dial_shown:
# data.decode_words transform IDs to tokens.
log.info("Input: %s", utils.untokenize(data.decode_words(inp_idx, self.rev_emb_dict)))
log.info("Argmax: %s, reward=%.4f", utils.untokenize(data.decode_words(actions, self.rev_emb_dict)),
argmax_reward)
action_memory = list()
sample_losses = []
for _ in range(self.samples):
# Monte-carlo: the data for each task in a batch of tasks.
inner_net_policies = []
inner_net_actions = []
inner_net_advantages = []
# 'r_sample' is the list of out_logits list and 'actions' is the list of output tokens.
# The output tokens are sampled following probabilities by using chain_sampling.
r_sample, actions = self.net.decode_chain_sampling(item_enc, beg_embedding, data.MAX_TOKENS,
context[idx], stop_at_token=self.end_token)
total_samples += 1
# Omit duplicate action sequence to decrease the computing time and to avoid the case that
# the probability of such kind of duplicate action sequences would be increased redundantly and abnormally.
duplicate_flag = False
if len(action_memory) > 0:
for temp_list in action_memory:
if utils.duplicate(temp_list, actions):
duplicate_flag = True
break
if not duplicate_flag:
action_memory.append(actions)
else:
skipped_samples += 1
continue
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
sample_reward = utils.calc_True_Reward(action_tokens, qa_info, self.adaptive)
# sample_reward = random.random()
true_reward_sample_step.append(sample_reward)
advantages = [sample_reward - argmax_reward] * len(actions)
if not dial_shown:
log.info("Sample: %s, reward=%.4f", utils.untokenize(data.decode_words(actions, self.rev_emb_dict)),
sample_reward)
if not mc:
net_policies.append(r_sample)
net_actions.extend(actions)
# Regard argmax_bleu calculated from decode_chain_argmax as baseline used in self-critic.
# Each token has same reward as 'sample_bleu - argmax_bleu'.
# [x] * y: stretch 'x' to [1*y] list in which each element is 'x'.
# # If the argmax_reward is 1.0, then whatever the sample_reward is,
# # the probability of actions that get reward = 1.0 could not be further updated.
# # The GAMMA is used to adjust this scenario.
# if argmax_reward == 1.0:
# net_advantages.extend([sample_reward - argmax_reward + GAMMA] * len(actions))
# else:
# net_advantages.extend([sample_reward - argmax_reward] * len(actions))
net_advantages.extend(advantages)
else:
inner_net_policies.append(r_sample)
inner_net_actions.extend(actions)
inner_net_advantages.extend(advantages)
if mc:
inner_policies_v = torch.cat(inner_net_policies).to(self.device)
# Indices of all output tokens whose size is 1 * N;
inner_actions_t = torch.LongTensor(inner_net_actions).to(self.device)
# All output tokens reward whose size is 1 *pack_batch N;
inner_adv_v = torch.FloatTensor(inner_net_advantages).to(self.device)
# Compute log(softmax(logits)) of all output tokens in size of N * output vocab size;
inner_log_prob_v = F.log_softmax(inner_policies_v, dim=1).to(self.device)
# Q_1 = Q_2 =...= Q_n = BLEU(OUT,REF);
# ▽J = Σ_n[Q▽logp(T)] = ▽Σ_n[Q*logp(T)] = ▽[Q_1*logp(T_1)+Q_2*logp(T_2)+...+Q_n*logp(T_n)];
# log_prob_v[range(len(net_actions)), actions_t]: for each output, get the output token's log(softmax(logits)).
# adv_v * log_prob_v[range(len(net_actions)), actions_t]:
# get Q * logp(T) for all tokens of all decode_chain_sampling samples in size of 1 * N;
inner_log_prob_actions_v = inner_adv_v * inner_log_prob_v[
range(len(inner_net_actions)), inner_actions_t].to(self.device)
# For the optimizer is Adam (Adaptive Moment Estimation) which is a optimizer used for gradient descent.
# Therefore, to maximize ▽J (log_prob_actions_v) is to minimize -▽J.
# .sum() is calculate the loss for a sample.
inner_sample_loss_policy_v = -inner_log_prob_actions_v.sum().to(self.device)
sample_losses.append(inner_sample_loss_policy_v)
if mc:
task_loss = torch.stack(sample_losses).to(self.device)
inner_task_loss_policy_v = task_loss.mean().to(self.device)
# Record the loss for each task in a batch.
net_losses.append(inner_task_loss_policy_v)
if not net_losses and not net_policies:
log.info("The net_policies is empty!")
# TODO the format of 0.0 should be the same as loss_v.
return 0.0, total_samples, skipped_samples
if not mc:
# Data for decode_chain_sampling samples and the number of such samples is the same as args.samples parameter.
# Logits of all output tokens whose size is N * output vocab size; N is the number of output tokens of decode_chain_sampling samples.
policies_v = torch.cat(net_policies)
policies_v = policies_v.cuda()
# Indices of all output tokens whose size is 1 * N;
actions_t = torch.LongTensor(net_actions).to(self.device)
actions_t = actions_t.cuda()
# All output tokens reward whose size is 1 * N;
adv_v = torch.FloatTensor(net_advantages).to(self.device)
adv_v = adv_v.cuda()
# Compute log(softmax(logits)) of all output tokens in size of N * output vocab size;
log_prob_v = F.log_softmax(policies_v, dim=1)
log_prob_v = log_prob_v.cuda()
# Q_1 = Q_2 =...= Q_n = BLEU(OUT,REF);
# ▽J = Σ_n[Q▽logp(T)] = ▽Σ_n[Q*logp(T)] = ▽[Q_1*logp(T_1)+Q_2*logp(T_2)+...+Q_n*logp(T_n)];
# log_prob_v[range(len(net_actions)), actions_t]: for each output, get the output token's log(softmax(logits)).
# adv_v * log_prob_v[range(len(net_actions)), actions_t]:
# get Q * logp(T) for all tokens of all decode_chain_sampling samples in size of 1 * N;
# Suppose log_prob_v is a two-dimensional tensor, value of which is [[1,2,3],[4,5,6],[7,8,9]];
# log_prob_actions_v = log_prob_v[[0,1,2], [0,1,2]]
# log_prob_actions_v is: tensor([1, 5, 9], device='cuda:0').
log_prob_actions_v = adv_v * log_prob_v[range(len(net_actions)), actions_t]
log_prob_actions_v = log_prob_actions_v.cuda()
# For the optimizer is Adam (Adaptive Moment Estimation) which is a optimizer used for gradient descent.
# Therefore, to maximize ▽J (log_prob_actions_v) is to minimize -▽J.
# .mean() is to calculate Monte Carlo sampling.
loss_policy_v = -log_prob_actions_v.mean()
loss_policy_v = loss_policy_v.cuda()
else:
batch_net_losses = torch.stack(net_losses).to(self.device)
# .mean() is utilized to calculate Mini-Batch Gradient Descent.
loss_policy_v = batch_net_losses.mean().to(self.device)
loss_v = loss_policy_v
return loss_v, total_samples, skipped_samples, true_reward_argmax_step, true_reward_sample_step
def reparam_inner_loss(self, task, weights=None, dial_shown=True):
total_samples = 0
skipped_samples = 0
batch = list()
batch.append(task)
true_reward_argmax_step = []
true_reward_sample_step = []
# Using input parameters to insert into the model.
self.reparam_net.set_parameter_buffer(weights)
# input_seq: the padded and embedded batch-sized input sequence.
# input_batch: the token ID matrix of batch-sized input sequence. Each row is corresponding to one input sentence.
# output_batch: the token ID matrix of batch-sized output sequences. Each row is corresponding to a list of several output sentences.
input_seq, input_batch, output_batch = self.net.pack_batch_no_out(batch, self.reparam_net.module.emb, self.device)
input_seq = input_seq.cuda()
# Get (two-layer) hidden state of encoder of samples in batch.
# enc = net.encode(input_seq)
context, enc = self.reparam_net.module.encode_context(input_seq)
net_policies = []
net_actions = []
net_advantages = []
# Transform ID to embedding.
beg_embedding = self.reparam_net.module.emb(self.beg_token)
beg_embedding = beg_embedding.cuda()
for idx, inp_idx in enumerate(input_batch):
# # Test whether the input sequence is correctly transformed into indices.
# input_tokens = [rev_emb_dict[temp_idx] for temp_idx in inp_idx]
# print (input_tokens)
# Get IDs of reference sequences' tokens corresponding to idx-th input sequence in batch.
qa_info = output_batch[idx]
# print(" Support sample %s is training..." % (qa_info['qid']))
# print (qa_info['qid'])
# # Get the (two-layer) hidden state of encoder of idx-th input sequence in batch.
item_enc = self.reparam_net.module.get_encoded_item(enc, idx)
# # 'r_argmax' is the list of out_logits list and 'actions' is the list of output tokens.
# # The output tokens are generated greedily by using chain_argmax (using last setp's output token as current input token).
r_argmax, actions = self.reparam_net.module.decode_chain_argmax(item_enc, beg_embedding, data.MAX_TOKENS, context[idx],
stop_at_token=self.end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# Get the highest BLEU score as baseline used in self-critic.
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
# argmax_reward = utils.calc_True_Reward(action_tokens, qa_info, self.adaptive)
argmax_reward = random.random()
true_reward_argmax_step.append(argmax_reward)
# # In this case, the BLEU score is so high that it is not needed to train such case with RL.
# if not args.disable_skip and argmax_reward > 0.99:
# skipped_samples += 1
# continue
# In one epoch, when model is optimized for the first time, the optimized result is displayed here.
# After that, all samples in this epoch don't display anymore.
if not dial_shown:
# data.decode_words transform IDs to tokens.
log.info("Input: %s", utils.untokenize(data.decode_words(inp_idx, self.rev_emb_dict)))
log.info("Argmax: %s, reward=%.4f", utils.untokenize(data.decode_words(actions, self.rev_emb_dict)),
argmax_reward)
action_memory = list()
for _ in range(self.samples):
# 'r_sample' is the list of out_logits list and 'actions' is the list of output tokens.
# The output tokens are sampled following probabilitis by using chain_sampling.
r_sample, actions = self.reparam_net.module.decode_chain_sampling(item_enc, beg_embedding, data.MAX_TOKENS,
context[idx], stop_at_token=self.end_token)
total_samples += 1
# Omit duplicate action sequence to decrease the computing time and to avoid the case that
# the probability of such kind of duplicate action sequences would be increased redundantly and abnormally.
duplicate_flag = False
if len(action_memory) > 0:
for temp_list in action_memory:
if utils.duplicate(temp_list, actions):
duplicate_flag = True
break
if not duplicate_flag:
action_memory.append(actions)
else:
skipped_samples += 1
continue
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
# sample_reward = utils.calc_True_Reward(action_tokens, qa_info, self.adaptive)
sample_reward = random.random()
true_reward_sample_step.append(sample_reward)
if not dial_shown:
log.info("Sample: %s, reward=%.4f", utils.untokenize(data.decode_words(actions, self.rev_emb_dict)),
sample_reward)
net_policies.append(r_sample)
net_actions.extend(actions)
# Regard argmax_bleu calculated from decode_chain_argmax as baseline used in self-critic.
# Each token has same reward as 'sample_bleu - argmax_bleu'.
# [x] * y: stretch 'x' to [1*y] list in which each element is 'x'.
# # If the argmax_reward is 1.0, then whatever the sample_reward is,
# # the probability of actions that get reward = 1.0 could not be further updated.
# # The GAMMA is used to adjust this scenario.
# if argmax_reward == 1.0:
# net_advantages.extend([sample_reward - argmax_reward + GAMMA] * len(actions))
# else:
# net_advantages.extend([sample_reward - argmax_reward] * len(actions))
net_advantages.extend([sample_reward - argmax_reward] * len(actions))
if not net_policies:
log.info("The net_policies is empty!")
# TODO the format of 0.0 should be the same as loss_v.
return 0.0, total_samples, skipped_samples
# Data for decode_chain_sampling samples and the number of such samples is the same as args.samples parameter.
# Logits of all output tokens whose size is N * output vocab size; N is the number of output tokens of decode_chain_sampling samples.
policies_v = torch.cat(net_policies)
policies_v = policies_v.cuda()
# Indices of all output tokens whose size is 1 * N;
actions_t = torch.LongTensor(net_actions).to(self.device)
actions_t = actions_t.cuda()
# All output tokens reward whose size is 1 * N;
adv_v = torch.FloatTensor(net_advantages).to(self.device)
adv_v = adv_v.cuda()
# Compute log(softmax(logits)) of all output tokens in size of N * output vocab size;
log_prob_v = F.log_softmax(policies_v, dim=1)
log_prob_v = log_prob_v.cuda()
# Q_1 = Q_2 =...= Q_n = BLEU(OUT,REF);
# ▽J = Σ_n[Q▽logp(T)] = ▽Σ_n[Q*logp(T)] = ▽[Q_1*logp(T_1)+Q_2*logp(T_2)+...+Q_n*logp(T_n)];
# log_prob_v[range(len(net_actions)), actions_t]: for each output, get the output token's log(softmax(logits)).
# adv_v * log_prob_v[range(len(net_actions)), actions_t]:
# get Q * logp(T) for all tokens of all decode_chain_sampling samples in size of 1 * N;
log_prob_actions_v = adv_v * log_prob_v[range(len(net_actions)), actions_t]
log_prob_actions_v = log_prob_actions_v.cuda()
# For the optimizer is Adam (Adaptive Moment Estimation) which is a optimizer used for gradient descent.
# Therefore, to maximize ▽J (log_prob_actions_v) is to minimize -▽J.
# .mean() is to calculate Monte Carlo sampling.
loss_policy_v = -log_prob_actions_v.mean()
loss_policy_v = loss_policy_v.cuda()
loss_v = loss_policy_v
self.reparam_net.reset_initial_parameter_buffer()
return loss_v, total_samples, skipped_samples, true_reward_argmax_step, true_reward_sample_step
def sample(self, tasks, first_order=False, dial_shown=True, epoch_count=0, batch_count=0):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
Here number of tasks is 8.
"""
task_losses = []
true_reward_argmax_batch = []
true_reward_sample_batch = []
total_samples = 0
skipped_samples = 0
self.net.zero_grad()
# To get copied weights of the model for inner training.
initial_names_weights_copy = self.get_inner_loop_parameter_dict(self.net.named_parameters())
for task in tasks:
# names_weights_copy = self.get_inner_loop_parameter_dict(self.net.named_parameters())
names_weights_copy = initial_names_weights_copy
log.info("Task %s is training..." % (str(task[1]['qid'])))
# Establish support set.
support_set = self.establish_support_set(task, self.steps, self.weak_flag, self.train_data_support_944K)
for step_sample in support_set:
inner_loss, inner_total_samples, inner_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.inner_loss(step_sample, weights=names_weights_copy, dial_shown=True)
total_samples += inner_total_samples
skipped_samples += inner_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
log.info(" Epoch %d, Batch %d, support sample %s is trained!" % (epoch_count, batch_count, str(step_sample[1]['qid'])))
# Get the new parameters after a one-step gradient update
# Each module parameter is computed as parameter = parameter - step_size * grad.
# When being saved in the OrderedDict of self.named_parameters(), it likes:
# OrderedDict([('sigma', Parameter containing:
# tensor([0.6931, 0.6931], requires_grad=True)), ('0.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('0.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True)), ('1.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('1.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True))])
names_weights_copy = self.update_params(inner_loss, names_weights_copy=names_weights_copy, step_size=self.fast_lr, first_order=first_order)
meta_loss, outer_total_samples, outer_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.inner_loss(task, weights=names_weights_copy, dial_shown=dial_shown)
task_losses.append(meta_loss)
total_samples += outer_total_samples
skipped_samples += outer_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
log.info("Epoch %d, Batch %d, task %s is trained!" % (epoch_count, batch_count, str(task[1]['qid'])))
meta_losses = torch.mean(torch.stack(task_losses))
return meta_losses, total_samples, skipped_samples, true_reward_argmax_batch, true_reward_sample_batch
# Using first-order to approximate the result of 2nd order MAML.
def first_order_sample(self, tasks, old_param_dict = None, first_order=False, dial_shown=True, epoch_count=0, batch_count=0, mc=False):
"""
Sample trajectories (before and after the update of the parameters) for all the tasks `tasks`.
Here number of tasks is 1.
"""
task_losses = []
grads_list = {}
true_reward_argmax_batch = []
true_reward_sample_batch = []
total_samples = 0
skipped_samples = 0
for task in tasks:
# For each task, the initial parameters are the same, i.e., the value stored in old_param_dict.
# temp_param_dict = self.get_net_parameter()
if old_param_dict is not None:
self.net.insert_new_parameter_to_layers(old_param_dict)
# temp_param_dict = self.get_net_parameter()
# Try to solve the bug: "UserWarning: RNN module weights are not part of single contiguous chunk of memory".
self.net.encoder.flatten_parameters()
self.net.decoder.flatten_parameters()
self.net.zero_grad()
# log.info("Task %s is training..." % (str(task[1]['qid'])))
# Establish support set.
support_set = self.establish_support_set(task, self.steps, self.weak_flag, self.train_data_support_944K)
for step_sample in support_set:
self.inner_optimizer.zero_grad()
inner_loss, inner_total_samples, inner_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.first_order_inner_loss(step_sample, dial_shown=True, mc=mc)
total_samples += inner_total_samples
skipped_samples += inner_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
# log.info(" Epoch %d, Batch %d, support sample %s is trained!" % (epoch_count, batch_count, str(step_sample[1]['qid'])))
# Inner update.
inner_loss.backward()
# To conduct a gradient ascent to minimize the loss (which is to maximize the reward).
# optimizer.step updates the value of x using the gradient x.grad.
# For example, the SGD optimizer performs:
# x += -lr * x.grad
self.inner_optimizer.step()
# temp_param_dict = self.get_net_parameter()
# optimizer.zero_grad() clears x.grad for every parameter x in the optimizer.
# It’s important to call this before loss.backward(),
# otherwise you’ll accumulate the gradients from multiple passes.
self.inner_optimizer.zero_grad()
meta_loss, outer_total_samples, outer_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.first_order_inner_loss(task, dial_shown=dial_shown, mc=mc)
task_losses.append(meta_loss)
self.net.zero_grad()
# Theta <- Theta - beta * (1/k) * ∑_(i=1:k)[grad(loss_theta_in/theta_in)]
grads = torch.autograd.grad(meta_loss, self.net.grad_parameters())
if not isinstance(grads_list, dict):
grads_list = {}
if isinstance(grads_list, dict) and len(grads_list) == 0:
for (name, _), grad in zip(self.net.grad_named_parameters(), grads):
grads_list[name] = []
grads_list[name].append(grad.clone().detach())
else:
for (name, _), grad in zip(self.net.grad_named_parameters(), grads):
grads_list[name].append(grad.clone().detach())
total_samples += outer_total_samples
skipped_samples += outer_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
log.info("Epoch %d, Batch %d, task %s is trained!" % (epoch_count, batch_count, str(task[1]['qid'])))
meta_losses = torch.sum(torch.stack(task_losses))
return meta_losses, grads_list, total_samples, skipped_samples, true_reward_argmax_batch, true_reward_sample_batch
# Using reptile to implement MAML.
def reptile_sample(self, tasks, old_param_dict=None, dial_shown=True, epoch_count=0,
batch_count=0, docID_dict=None, rev_docID_dict=None, emb_dict=None, qtype_docs_range=None, random=False, monte_carlo=False):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
Here number of tasks is 1.
"""
task_losses = []
true_reward_argmax_batch = []
true_reward_sample_batch = []
total_samples = 0
skipped_samples = 0
running_vars = {}
for task in tasks:
# For each task, the initial parameters are the same, i.e., the value stored in old_param_dict.
# temp_param_dict = self.get_net_parameter()
if old_param_dict is not None:
self.net.insert_new_parameter_to_layers(old_param_dict)
# temp_param_dict = self.get_net_parameter()
# Try to solve the bug: "UserWarning: RNN module weights are not part of single contiguous chunk of memory".
self.net.encoder.flatten_parameters()
self.net.decoder.flatten_parameters()
self.net.zero_grad()
# log.info("Task %s for reptile is training..." % (str(task[1]['qid'])))
# Establish support set.
# If random_flag == True, randomly select support samples in the same question category.
if random:
support_set = self.establish_random_support_set(task=task, N=self.steps, train_data_support_944K=self.train_data_support_944K)
else:
support_set = self.establish_support_set(task=task, N=self.steps, weak=self.weak_flag, train_data_support_944K=self.train_data_support_944K)
# support_set_sample, logprob_list = self.establish_support_set_by_retriever_sampling(task=task, N=self.steps, train_data_support_944K=self.train_data_support_944K,docID_dict=docID_dict, rev_docID_dict=rev_docID_dict, emb_dict=emb_dict, qtype_docs_range=qtype_docs_range)
# support_set = self.establish_support_set_by_retriever_argmax(task=task, N=self.steps, train_data_support_944K=self.train_data_support_944K, docID_dict=docID_dict, rev_docID_dict=rev_docID_dict, emb_dict=emb_dict, qtype_docs_range=qtype_docs_range)
for step_sample in support_set:
self.inner_optimizer.zero_grad()
inner_loss, inner_total_samples, inner_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.first_order_inner_loss(
step_sample, dial_shown=True, mc=monte_carlo)
total_samples += inner_total_samples
skipped_samples += inner_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
# log.info(" Epoch %d, Batch %d, support sample %s is trained!" % (epoch_count, batch_count, str(step_sample[1]['qid'])))
# Inner update.
inner_loss.backward()
# To conduct a gradient ascent to minimize the loss (which is to maximize the reward).
# optimizer.step updates the value of x using the gradient x.grad.
# For example, the SGD optimizer performs:
# x += -lr * x.grad
self.inner_optimizer.step()
# temp_param_dict = self.get_net_parameter()
# optimizer.zero_grad() clears x.grad for every parameter x in the optimizer.
# It’s important to call this before loss.backward(),
# otherwise you’ll accumulate the gradients from multiple passes.
self.inner_optimizer.zero_grad()
self.net.zero_grad()
meta_loss, outer_total_samples, outer_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.first_order_inner_loss(task, dial_shown=dial_shown, mc=monte_carlo)
task_losses.append(meta_loss)
meta_loss.backward()
# To conduct a gradient ascent to minimize the loss (which is to maximize the reward).
# optimizer.step updates the value of x using the gradient x.grad.
# For example, the SGD optimizer performs:
# x += -lr * x.grad
self.inner_optimizer.step()
# temp_param_dict = self.get_net_parameter()
# Store the parameters of model for each meta gradient update step (each task).
if running_vars == {}:
for name, param in self.get_net_named_parameter().items():
running_vars[name] = []
running_vars[name].append(param.data)
else:
for name, param in self.get_net_named_parameter().items():
# Add up the value of each parameter of the model in each meta gradient update step.
running_vars[name].append(param.data)
total_samples += outer_total_samples
skipped_samples += outer_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
log.info("Epoch %d, Batch %d, task %s is trained!" % (epoch_count, batch_count, str(task[1]['qid'])))
meta_losses = torch.sum(torch.stack(task_losses))
return meta_losses, running_vars, total_samples, skipped_samples, true_reward_argmax_batch, true_reward_sample_batch
# Train the retriever.
def retriever_sample(self, tasks, old_param_dict=None, dial_shown=True, epoch_count=0,
batch_count=0, docID_dict=None, rev_docID_dict=None, emb_dict=None, qtype_docs_range=None, number_of_supportsets=5, mc=False, device='cpu'):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
Here number of tasks is 1.
"""
retriever_true_reward_argmax_batch = []
retriever_true_reward_sample_batch = []
retriever_net_policies = []
retriever_net_advantages = []
retriever_total_samples = 0
retriever_skipped_samples = 0
for task in tasks:
# log.info("Task %s for retriever is training..." % (str(task[1]['qid'])))
# Argmax as baseline.
# For each task, the initial parameters are the same, i.e., the value stored in old_param_dict.
# temp_param_dict = self.get_net_parameter()
if old_param_dict is not None:
self.net.insert_new_parameter_to_layers(old_param_dict)
# temp_param_dict = self.get_net_parameter()
# Try to solve the bug: "UserWarning: RNN module weights are not part of single contiguous chunk of memory".
self.net.encoder.flatten_parameters()
self.net.decoder.flatten_parameters()
self.net.zero_grad()
support_set = self.establish_support_set_by_retriever_argmax(task=task, N=self.steps, train_data_support_944K=self.train_data_support_944K, docID_dict=docID_dict, rev_docID_dict=rev_docID_dict, emb_dict=emb_dict, qtype_docs_range=qtype_docs_range)
for step_sample in support_set:
self.inner_optimizer.zero_grad()
inner_loss, _, _, _, _ = self.first_order_inner_loss(
step_sample, dial_shown=True, mc=mc)
# log.info(" Epoch %d, Batch %d, support sample for argmax_reward %s is trained!" % (epoch_count, batch_count, str(step_sample[1]['qid'])))
# Inner update.
inner_loss.backward()
self.inner_optimizer.step()
# temp_param_dict = self.get_net_parameter()
input_seq = self.net.pack_input(task[0], self.net.emb)
# enc = net.encode(input_seq)
context, enc = self.net.encode_context(input_seq)
# # Always use the first token in input sequence, which is '#BEG' as the initial input of decoder.
_, actions = self.net.decode_chain_argmax(enc, input_seq.data[0:1],
seq_len=data.MAX_TOKENS, context=context[0],
stop_at_token=self.end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# Get the highest BLEU score as baseline used in self-critic.
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
retriever_argmax_reward = utils.calc_True_Reward(action_tokens, task[1], self.adaptive)
# retriever_argmax_reward = random.random()
retriever_true_reward_argmax_batch.append(retriever_argmax_reward)
# Reward for each sampling support set.
# Establish support set.
support_sets, logprob_lists, total_samples, skip_samples = self.establish_support_set_by_retriever_sampling(task=task, N=self.steps,
train_data_support_944K=self.train_data_support_944K,
docID_dict=docID_dict,
rev_docID_dict=rev_docID_dict,
emb_dict=emb_dict,
qtype_docs_range=qtype_docs_range,number_of_supportsets=number_of_supportsets)
if not mc:
retriever_net_policies.append(torch.cat(logprob_lists))
retriever_total_samples += total_samples
retriever_skipped_samples += skip_samples
support_set_count = 0
net_losses = []
for j, support_set in enumerate(support_sets):
# For each task, the initial parameters are the same, i.e., the value stored in old_param_dict.
# temp_param_dict = self.get_net_parameter()
if old_param_dict is not None:
self.net.insert_new_parameter_to_layers(old_param_dict)
# temp_param_dict = self.get_net_parameter()
# Try to solve the bug: "UserWarning: RNN module weights are not part of single contiguous chunk of memory".
self.net.encoder.flatten_parameters()
self.net.decoder.flatten_parameters()
self.net.zero_grad()
for step_sample in support_set:
self.inner_optimizer.zero_grad()
inner_loss, _, _, _, _ = self.first_order_inner_loss(
step_sample, dial_shown=True, mc=mc)
# log.info(" Epoch %d, Batch %d, support sets %d, sample for sample_reward %s is trained!" % (epoch_count, batch_count, support_set_count, str(step_sample[1]['qid'])))
# Inner update.
inner_loss.backward()
# To conduct a gradient ascent to minimize the loss (which is to maximize the reward).
# optimizer.step updates the value of x using the gradient x.grad.
# For example, the SGD optimizer performs:
# x += -lr * x.grad
self.inner_optimizer.step()
# temp_param_dict = self.get_net_parameter()
support_set_count += 1
input_seq = self.net.pack_input(task[0], self.net.emb)
# enc = net.encode(input_seq)
context, enc = self.net.encode_context(input_seq)
# # Always use the first token in input sequence, which is '#BEG' as the initial input of decoder.
_, actions = self.net.decode_chain_argmax(enc, input_seq.data[0:1],
seq_len=data.MAX_TOKENS, context=context[0],
stop_at_token=self.end_token)
# Show what the output action sequence is.
action_tokens = []
for temp_idx in actions:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
# Get the highest BLEU score as baseline used in self-critic.
# If the last parameter is false, it means that the 0-1 reward is used to calculate the accuracy.
# Otherwise the adaptive reward is used.
retriever_sample_reward = utils.calc_True_Reward(action_tokens, task[1], self.adaptive)
# retriever_sample_reward = random.random()
retriever_true_reward_sample_batch.append(retriever_sample_reward)
advantages = [retriever_sample_reward - retriever_argmax_reward] * len(support_set)
if not mc:
retriever_net_advantages.extend(advantages)
else:
inner_adv_v = torch.FloatTensor(advantages).to(device)
inner_log_prob_v = logprob_lists[j].to(device)
inner_log_prob_adv_v = inner_log_prob_v * inner_adv_v
inner_log_prob_adv_v = inner_log_prob_adv_v.to(device)
inner_loss_policy_v = -inner_log_prob_adv_v.sum()
inner_loss_policy_v = inner_loss_policy_v.to(device)
net_losses.append(inner_loss_policy_v)
log.info("Epoch %d, Batch %d, task %s for retriever is trained!" % (epoch_count, batch_count, str(task[1]['qid'])))
if not mc:
log_prob_v = torch.cat(retriever_net_policies)
log_prob_v = log_prob_v.cuda()
adv_v = torch.FloatTensor(retriever_net_advantages)
adv_v = adv_v.cuda()
log_prob_actions_v = log_prob_v * adv_v
log_prob_actions_v = log_prob_actions_v.cuda()
loss_policy_v = -log_prob_actions_v.mean()
loss_policy_v = loss_policy_v.cuda()
else:
batch_net_losses = torch.stack(net_losses).to(device)
loss_policy_v = batch_net_losses.mean().to(device)
loss_v = loss_policy_v
return loss_v, retriever_true_reward_argmax_batch, retriever_true_reward_sample_batch, retriever_total_samples, retriever_skipped_samples
# Using reparam class to accomplish 2nd derivative for MAML.
def reparam_sample(self, tasks, first_order=False, dial_shown=True, epoch_count=0, batch_count=0):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
"""
task_losses = []
true_reward_argmax_batch = []
true_reward_sample_batch = []
total_samples = 0
skipped_samples = 0
self.net.zero_grad()
for task in tasks:
log.info("Task %s is training..." % (str(task[1]['qid'])))
# Establish support set.
support_set = self.establish_support_set(task, self.steps, self.weak_flag, self.train_data_support_944K)
theta_0 = self.reparam_net.flat_param
theta = theta_0
for step_sample in support_set:
inner_loss, inner_total_samples, inner_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.reparam_inner_loss(step_sample, weights=theta, dial_shown=True)
total_samples += inner_total_samples
skipped_samples += inner_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
log.info(" Epoch %d, Batch %d, support sample %s is trained!" % (
epoch_count, batch_count, str(step_sample[1]['qid'])))
# Get the new parameters after a one-step gradient update
# Each module parameter is computed as parameter = parameter - step_size * grad.
# When being saved in the OrderedDict of self.named_parameters(), it likes:
# OrderedDict([('sigma', Parameter containing:
# tensor([0.6931, 0.6931], requires_grad=True)), ('0.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('0.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True)), ('1.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('1.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True))])
theta = self.reparam_update_params(inner_loss, theta=theta,
lr=self.fast_lr, first_order=first_order)
meta_loss, outer_total_samples, outer_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.reparam_inner_loss(task, weights=theta, dial_shown=dial_shown)
task_losses.append(meta_loss)
total_samples += outer_total_samples
skipped_samples += outer_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
log.info("Epoch %d, Batch %d, task %s is trained!" % (epoch_count, batch_count, str(task[1]['qid'])))
meta_losses = torch.sum(torch.stack(task_losses))
return meta_losses, total_samples, skipped_samples, true_reward_argmax_batch, true_reward_sample_batch
def sampleForTest(self, task, first_order=False, dial_shown=True, epoch_count=0, batch_count=0):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
Here number of tasks is 1.
"""
task_losses = []
true_reward_argmax_batch = []
true_reward_sample_batch = []
total_samples = 0
skipped_samples = 0
self.net.zero_grad()
# To get copied weights of the model for inner training.
names_weights_copy = self.get_inner_loop_parameter_dict(self.net.named_parameters())
log.info("Task %s is training..." % (str(task[1]['qid'])))
# Establish support set.
support_set = self.establish_support_set(task, self.steps, self.weak_flag, self.train_data_support_944K)
for step_sample in support_set:
# todo: use the similarity between the sample in support set and the task to scale the reward or loss
# when meta optimization.
inner_loss, inner_total_samples, inner_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.inner_loss(step_sample, weights=names_weights_copy, dial_shown=True)
total_samples += inner_total_samples
skipped_samples += inner_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
# log.info(" Epoch %d, Batch %d, support sample %s is trained!" % (epoch_count, batch_count, str(step_sample[1]['qid'])))
# Get the new parameters after a one-step gradient update
# Each module parameter is computed as parameter = parameter - step_size * grad.
# When being saved in the OrderedDict of self.named_parameters(), it likes:
# OrderedDict([('sigma', Parameter containing:
# tensor([0.6931, 0.6931], requires_grad=True)), ('0.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('0.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True)), ('1.weight', Parameter containing:
# tensor([[1., 1.],
# [1., 1.]], requires_grad=True)), ('1.bias', Parameter containing:
# tensor([0., 0.], requires_grad=True))])
names_weights_copy = self.update_params(inner_loss, names_weights_copy=names_weights_copy, step_size=self.fast_lr, first_order=first_order)
if names_weights_copy is not None:
self.net.insert_new_parameter(names_weights_copy, True)
input_seq = self.net.pack_input(task[0], self.net.emb)
# enc = net.encode(input_seq)
context, enc = self.net.encode_context(input_seq)
# # Always use the first token in input sequence, which is '#BEG' as the initial input of decoder.
_, tokens = self.net.decode_chain_argmax(enc, input_seq.data[0:1],
seq_len=data.MAX_TOKENS, context=context[0], stop_at_token=self.end_token)
token_string = ''
for token in tokens:
if token in self.rev_emb_dict and self.rev_emb_dict.get(token) != '#END':
token_string += str(self.rev_emb_dict.get(token)).upper() + ' '
token_string = token_string.strip()
return token_string
def first_order_sampleForTest(self, task, old_param_dict = None, first_order=False, dial_shown=True, epoch_count=0, batch_count=0,random=False, mc=False):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
Here number of tasks is 1.
"""
# For each task, the initial parameters are the same, i.e., the value stored in old_param_dict.
# temp_param_dict = self.get_net_parameter()
if old_param_dict is not None:
self.net.insert_new_parameter_to_layers(old_param_dict)
# temp_param_dict = self.get_net_parameter()
# Try to solve the bug: "UserWarning: RNN module weights are not part of single contiguous chunk of memory".
self.net.encoder.flatten_parameters()
self.net.decoder.flatten_parameters()
self.net.zero_grad()
log.info("Task %s is testing..." % (str(task[1]['qid'])))
true_reward_argmax_batch = []
true_reward_sample_batch = []
total_samples = 0
skipped_samples = 0
# Establish support set.
if not random:
support_set = self.establish_support_set(task, self.steps, self.weak_flag, self.train_data_support_944K)
else:
# log.info("Using random support set...")
support_set = self.establish_random_support_set(task=task, N=self.steps, train_data_support_944K=self.train_data_support_944K)
for step_sample in support_set:
self.inner_optimizer.zero_grad()
inner_loss, inner_total_samples, inner_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.first_order_inner_loss(step_sample, dial_shown=True, mc=mc)
total_samples += inner_total_samples
skipped_samples += inner_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
# log.info(" Support sample %s is trained!" % (str(step_sample[1]['qid'])))
# Inner update.
inner_loss.backward()
self.inner_optimizer.step()
# temp_param_dict = self.get_net_parameter()
input_seq = self.net.pack_input(task[0], self.net.emb)
# enc = net.encode(input_seq)
context, enc = self.net.encode_context(input_seq)
# # Always use the first token in input sequence, which is '#BEG' as the initial input of decoder.
_, tokens = self.net.decode_chain_argmax(enc, input_seq.data[0:1],
seq_len=data.MAX_TOKENS, context=context[0], stop_at_token=self.end_token)
token_string = ''
for token in tokens:
if token in self.rev_emb_dict and self.rev_emb_dict.get(token) != '#END':
token_string += str(self.rev_emb_dict.get(token)).upper() + ' '
token_string = token_string.strip()
return token_string
def maml_retriever_sampleForTest(self, task, old_param_dict = None, docID_dict=None, rev_docID_dict=None, emb_dict=None, qtype_docs_range=None, steps=5, mc=False):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
Here number of tasks is 1.
"""
# For each task, the initial parameters are the same, i.e., the value stored in old_param_dict.
# temp_param_dict = self.get_net_parameter()
if old_param_dict is not None:
self.net.insert_new_parameter_to_layers(old_param_dict)
# temp_param_dict = self.get_net_parameter()
# Try to solve the bug: "UserWarning: RNN module weights are not part of single contiguous chunk of memory".
self.net.encoder.flatten_parameters()
self.net.decoder.flatten_parameters()
self.net.zero_grad()
log.info("Task %s is testing..." % (str(task[1]['qid'])))
true_reward_argmax_batch = []
true_reward_sample_batch = []
total_samples = 0
skipped_samples = 0
# Establish support set.
support_set = self.establish_support_set_by_retriever_argmax(task=task, N=steps, train_data_support_944K=self.train_data_support_944K, docID_dict=docID_dict, rev_docID_dict=rev_docID_dict, emb_dict=emb_dict, qtype_docs_range=qtype_docs_range)
for step_sample in support_set:
self.inner_optimizer.zero_grad()
inner_loss, inner_total_samples, inner_skipped_samples, true_reward_argmax_step, true_reward_sample_step = self.first_order_inner_loss(step_sample, dial_shown=True, mc=mc)
total_samples += inner_total_samples
skipped_samples += inner_skipped_samples
true_reward_argmax_batch.extend(true_reward_argmax_step)
true_reward_sample_batch.extend(true_reward_sample_step)
# Inner update.
inner_loss.backward()
self.inner_optimizer.step()
# temp_param_dict = self.get_net_parameter()
# log.info(" Support sample %s is trained!" % (str(step_sample[1]['qid'])))
input_seq = self.net.pack_input(task[0], self.net.emb)
# enc = net.encode(input_seq)
context, enc = self.net.encode_context(input_seq)
# # Always use the first token in input sequence, which is '#BEG' as the initial input of decoder.
_, tokens = self.net.decode_chain_argmax(enc, input_seq.data[0:1],
seq_len=data.MAX_TOKENS, context=context[0], stop_at_token=self.end_token)
token_string = ''
for token in tokens:
if token in self.rev_emb_dict and self.rev_emb_dict.get(token) != '#END':
token_string += str(self.rev_emb_dict.get(token)).upper() + ' '
token_string = token_string.strip()
# Show what the output action sequence is.
action_tokens = []
for temp_idx in tokens:
if temp_idx in self.rev_emb_dict and self.rev_emb_dict.get(temp_idx) != '#END':
action_tokens.append(str(self.rev_emb_dict.get(temp_idx)).upper())
return token_string, action_tokens
| 91,609 | 58.603123 | 328 | py |
MRL-CQA | MRL-CQA-master/S2SRL/libbots/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
class Attention(nn.Module):
r"""
Applies an attention mechanism on the output features from the decoder.
.. math::
\begin{array}{ll}
x = context*output \\
attn = exp(x_i) / sum_j exp(x_j) \\
output = \tanh(w * (attn * context) + b * output)
\end{array}
Args:
dim(int): The number of expected features in the output
Inputs: output, context
- **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.
- **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
Outputs: output, attn
- **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.
- **attn** (batch, output_len, input_len): tensor containing attention weights.
Attributes:
linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.
mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.
Examples::
# >>> attention = seq2seq.models.Attention(256)
# >>> context = Variable(torch.randn(5, 3, 256))
# >>> output = Variable(torch.randn(5, 5, 256))
# >>> output, attn = attention(output, context)
"""
def __init__(self, dim):
super(Attention, self).__init__()
self.linear_out = nn.Linear(dim*2, dim)
self.mask = None
def set_mask(self, mask):
"""
Sets indices to be masked
Args:
mask (torch.Tensor): tensor containing indices to be masked
"""
self.mask = mask
def forward(self, output, context):
# The idiomatic way to perform an explicit typecheck in Python is to use isinstance(x, Y) rather than type(x) == Y, type(x) is Y.
# Though there are unusual situations where these give different results.
# To check whether the output is type of PackedSequence.
if(isinstance(output, rnn_utils.PackedSequence)):
unpack_output, _ = rnn_utils.pad_packed_sequence(output, batch_first=True)
else:
unpack_output = output
batch_size = unpack_output.size(0)
hidden_size = unpack_output.size(2)
context_trans = context.view(1, -1, context.size(1))
input_size = context_trans.size(1)
# for idx1, temp1 in enumerate(unpack_output[0]):
# for idx, temp in enumerate(context_trans[0]):
# print('o'+str(idx1)+',c'+str(idx)+': '+str(torch.dot(temp1, temp)))
# (batch, out_len, dim) * (batch, dim, context_len) -> (batch, out_len, context_len)
attn = torch.bmm(unpack_output, context_trans.transpose(1, 2))
if self.mask is not None:
attn.data.masked_fill_(self.mask, -float('inf'))
attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size)
# torch.bmm(batch1, batch2, out=None) → Tensor
# Performs a batch matrix-matrix product of matrices stored in batch1 and batch2.
# batch1 and batch2 must be 3-D tensors each containing the same number of matrices.
# >>> batch1 = torch.randn(10, 3, 4)
# >>> batch2 = torch.randn(10, 4, 5)
# >>> res = torch.bmm(batch1, batch2)
# >>> res.size()
# torch.Size([10, 3, 5])
# (batch, out_len, context_len) * (batch, context_len, dim) -> (batch, out_len, dim)
mix = torch.bmm(attn, context_trans)
# concat -> (batch, out_len, 2*dim)
combined = torch.cat((mix, unpack_output), dim=2)
# output -> (batch, out_len, dim)
output_result = torch.tanh(self.linear_out(combined.view(-1, 2 * hidden_size))).view(-1, hidden_size)
# Transform result into PackedSequence format.
packed_output_result = rnn_utils.PackedSequence(output_result, output.batch_sizes.detach()) if isinstance(output, rnn_utils.PackedSequence) else output_result.view(1, -1, hidden_size)
return packed_output_result, attn
| 4,204 | 45.722222 | 191 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/main.py | """
Implementation of ECCV 2018 paper "Graph R-CNN for Scene Graph Generation".
Author: Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, Devi Parikh
Contact: jw2yang@gatech.edu
"""
import os
import pprint
import argparse
import numpy as np
import torch
import datetime
from lib.config import cfg
from lib.model import build_model
from lib.scene_parser.rcnn.utils.miscellaneous import mkdir, save_config, get_timestamp
from lib.scene_parser.rcnn.utils.comm import synchronize, get_rank
from lib.scene_parser.rcnn.utils.logger import setup_logger
def train(cfg, args):
"""
train scene graph generation model
"""
arguments = {}
arguments["iteration"] = 0
model = build_model(cfg, arguments, args.local_rank, args.distributed)
model.train()
return model
def test(cfg, args, model=None):
"""
test scene graph generation model
"""
if model is None:
arguments = {}
arguments["iteration"] = 0
model = build_model(cfg, arguments, args.local_rank, args.distributed)
model.test(visualize=args.visualize)
def main():
''' parse config file '''
parser = argparse.ArgumentParser(description="Scene Graph Generation")
parser.add_argument("--config-file", default="configs/baseline_res101.yaml")
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--session", type=int, default=0)
parser.add_argument("--resume", type=int, default=0)
parser.add_argument("--batchsize", type=int, default=0)
parser.add_argument("--inference", action='store_true')
parser.add_argument("--instance", type=int, default=-1)
parser.add_argument("--use_freq_prior", action='store_true')
parser.add_argument("--visualize", action='store_true')
parser.add_argument("--algorithm", type=str, default='sg_baseline')
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.resume = args.resume
cfg.instance = args.instance
cfg.inference = args.inference
cfg.MODEL.USE_FREQ_PRIOR = args.use_freq_prior
cfg.MODEL.ALGORITHM = args.algorithm
if args.batchsize > 0:
cfg.DATASET.TRAIN_BATCH_SIZE = args.batchsize
if args.session > 0:
cfg.MODEL.SESSION = str(args.session)
# cfg.freeze()
if not os.path.exists("logs") and get_rank() == 0:
os.mkdir("logs")
logger = setup_logger("scene_graph_generation", "logs", get_rank(),
filename="{}_{}.txt".format(args.algorithm, get_timestamp()))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
output_config_path = os.path.join("logs", 'config.yml')
logger.info("Saving config into: {}".format(output_config_path))
save_config(cfg, output_config_path)
if not args.inference:
model = train(cfg, args)
else:
test(cfg, args)
if __name__ == "__main__":
main()
| 3,200 | 33.419355 | 87 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/model.py | import os
import datetime
import logging
import time
import numpy as np
import torch
import cv2
from .data.build import build_data_loader
from .scene_parser.parser import build_scene_parser
from .scene_parser.parser import build_scene_parser_optimizer
from .scene_parser.rcnn.utils.metric_logger import MetricLogger
from .scene_parser.rcnn.utils.timer import Timer, get_time_str
from .scene_parser.rcnn.utils.comm import synchronize, all_gather, is_main_process, get_world_size
from .scene_parser.rcnn.utils.visualize import select_top_predictions, overlay_boxes, overlay_class_names
from .data.evaluation import evaluate, evaluate_sg
from .utils.box import bbox_overlaps
class SceneGraphGeneration:
"""
Scene graph generation
"""
def __init__(self, cfg, arguments, local_rank, distributed):
"""
initialize scene graph generation model
"""
self.cfg = cfg
self.arguments = arguments.copy()
self.device = torch.device("cuda")
# build data loader
self.data_loader_train = build_data_loader(cfg, split="train", is_distributed=distributed)
self.data_loader_test = build_data_loader(cfg, split="test", is_distributed=distributed)
cfg.DATASET.IND_TO_OBJECT = self.data_loader_train.dataset.ind_to_classes
cfg.DATASET.IND_TO_PREDICATE = self.data_loader_train.dataset.ind_to_predicates
logger = logging.getLogger("scene_graph_generation.trainer")
logger.info("Train data size: {}".format(len(self.data_loader_train.dataset)))
logger.info("Test data size: {}".format(len(self.data_loader_test.dataset)))
if not os.path.exists("freq_prior.npy"):
logger.info("Computing frequency prior matrix...")
fg_matrix, bg_matrix = self._get_freq_prior()
prob_matrix = fg_matrix.astype(np.float32)
prob_matrix[:,:,0] = bg_matrix
prob_matrix[:,:,0] += 1
prob_matrix /= np.sum(prob_matrix, 2)[:,:,None]
# prob_matrix /= float(fg_matrix.max())
np.save("freq_prior.npy", prob_matrix)
# build scene graph generation model
self.scene_parser = build_scene_parser(cfg); self.scene_parser.to(self.device)
self.sp_optimizer, self.sp_scheduler, self.sp_checkpointer, self.extra_checkpoint_data = \
build_scene_parser_optimizer(cfg, self.scene_parser, local_rank=local_rank, distributed=distributed)
self.arguments.update(self.extra_checkpoint_data)
def _get_freq_prior(self, must_overlap=False):
fg_matrix = np.zeros((
self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES,
self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES,
self.cfg.MODEL.ROI_RELATION_HEAD.NUM_CLASSES
), dtype=np.int64)
bg_matrix = np.zeros((
self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES,
self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES,
), dtype=np.int64)
for ex_ind in range(len(self.data_loader_train.dataset)):
gt_classes = self.data_loader_train.dataset.gt_classes[ex_ind].copy()
gt_relations = self.data_loader_train.dataset.relationships[ex_ind].copy()
gt_boxes = self.data_loader_train.dataset.gt_boxes[ex_ind].copy()
# For the foreground, we'll just look at everything
o1o2 = gt_classes[gt_relations[:, :2]]
for (o1, o2), gtr in zip(o1o2, gt_relations[:,2]):
fg_matrix[o1, o2, gtr] += 1
# For the background, get all of the things that overlap.
o1o2_total = gt_classes[np.array(
self._box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)]
for (o1, o2) in o1o2_total:
bg_matrix[o1, o2] += 1
if ex_ind % 20 == 0:
print("processing {}/{}".format(ex_ind, len(self.data_loader_train.dataset)))
return fg_matrix, bg_matrix
def _box_filter(self, boxes, must_overlap=False):
""" Only include boxes that overlap as possible relations.
If no overlapping boxes, use all of them."""
n_cands = boxes.shape[0]
overlaps = bbox_overlaps(torch.from_numpy(boxes.astype(np.float)), torch.from_numpy(boxes.astype(np.float))).numpy() > 0
np.fill_diagonal(overlaps, 0)
all_possib = np.ones_like(overlaps, dtype=np.bool)
np.fill_diagonal(all_possib, 0)
if must_overlap:
possible_boxes = np.column_stack(np.where(overlaps))
if possible_boxes.size == 0:
possible_boxes = np.column_stack(np.where(all_possib))
else:
possible_boxes = np.column_stack(np.where(all_possib))
return possible_boxes
def train(self):
"""
main body for training scene graph generation model
"""
start_iter = self.arguments["iteration"]
logger = logging.getLogger("scene_graph_generation.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(self.data_loader_train)
self.scene_parser.train()
start_training_time = time.time()
end = time.time()
for i, data in enumerate(self.data_loader_train, start_iter):
data_time = time.time() - end
self.arguments["iteration"] = i
self.sp_scheduler.step()
imgs, targets, _ = data
imgs = imgs.to(self.device); targets = [target.to(self.device) for target in targets]
loss_dict = self.scene_parser(imgs, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = loss_dict
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
self.sp_optimizer.zero_grad()
losses.backward()
self.sp_optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if i % 20 == 0 or i == max_iter:
logger.info(
meters.delimiter.join(
[
"model: {tag}",
"eta: {eta}",
"iter: {iter}/{max_iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
tag="scene_parser",
eta=eta_string,
iter=i, max_iter=max_iter,
meters=str(meters),
lr=self.sp_optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if (i + 1) % self.cfg.SOLVER.CHECKPOINT_PERIOD == 0:
self.sp_checkpointer.save("checkpoint_{:07d}".format(i), **self.arguments)
if (i + 1) == max_iter:
self.sp_checkpointer.save("checkpoint_final", **self.arguments)
def _accumulate_predictions_from_multiple_gpus(self, predictions_per_gpu):
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("scene_graph_generation.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
def visualize_detection(self, dataset, img_ids, imgs, predictions):
visualize_folder = "visualize"
if not os.path.exists(visualize_folder):
os.mkdir(visualize_folder)
for i, prediction in enumerate(predictions):
top_prediction = select_top_predictions(prediction)
img = imgs.tensors[i].permute(1, 2, 0).contiguous().cpu().numpy() + np.array(self.cfg.INPUT.PIXEL_MEAN).reshape(1, 1, 3)
result = img.copy()
result = overlay_boxes(result, top_prediction)
result = overlay_class_names(result, top_prediction, dataset.ind_to_classes)
cv2.imwrite(os.path.join(visualize_folder, "detection_{}.jpg".format(img_ids[i])), result)
def test(self, timer=None, visualize=False):
"""
main body for testing scene graph generation model
"""
logger = logging.getLogger("scene_graph_generation.inference")
logger.info("Start evaluating")
self.scene_parser.eval()
targets_dict = {}
results_dict = {}
if self.cfg.MODEL.RELATION_ON:
results_pred_dict = {}
cpu_device = torch.device("cpu")
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
reg_recalls = []
for i, data in enumerate(self.data_loader_test, 0):
imgs, targets, image_ids = data
imgs = imgs.to(self.device); targets = [target.to(self.device) for target in targets]
if i % 10 == 0:
logger.info("inference on batch {}/{}...".format(i, len(self.data_loader_test)))
with torch.no_grad():
if timer:
timer.tic()
output = self.scene_parser(imgs)
if self.cfg.MODEL.RELATION_ON:
output, output_pred = output
output_pred = [o.to(cpu_device) for o in output_pred]
ious = bbox_overlaps(targets[0].bbox, output[0].bbox)
reg_recall = (ious.max(1)[0] > 0.5).sum().item() / ious.shape[0]
reg_recalls.append(reg_recall)
if timer:
torch.cuda.synchronize()
timer.toc()
output = [o.to(cpu_device) for o in output]
if visualize:
self.visualize_detection(self.data_loader_test.dataset, image_ids, imgs, output)
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
targets_dict.update(
{img_id: target for img_id, target in zip(image_ids, targets)}
)
if self.cfg.MODEL.RELATION_ON:
results_pred_dict.update(
{img_id: result for img_id, result in zip(image_ids, output_pred)}
)
if self.cfg.instance > 0 and i > self.cfg.instance:
break
synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
num_devices = get_world_size()
logger.info(
"Total run time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(self.data_loader_test.dataset), num_devices
)
)
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
"Model inference time: {} ({} s / img per device, on {} devices)".format(
total_infer_time,
inference_timer.total_time * num_devices / len(self.data_loader_test.dataset),
num_devices,
)
)
predictions = self._accumulate_predictions_from_multiple_gpus(results_dict)
if self.cfg.MODEL.RELATION_ON:
predictions_pred = self._accumulate_predictions_from_multiple_gpus(results_pred_dict)
if not is_main_process():
return
output_folder = "results"
if output_folder:
if not os.path.exists(output_folder):
os.mkdir(output_folder)
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
if self.cfg.MODEL.RELATION_ON:
torch.save(predictions_pred, os.path.join(output_folder, "predictions_pred.pth"))
extra_args = dict(
box_only=False if self.cfg.MODEL.RETINANET_ON else self.cfg.MODEL.RPN_ONLY,
iou_types=("bbox",),
expected_results=self.cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=self.cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
)
eval_det_results = evaluate(dataset=self.data_loader_test.dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
if self.cfg.MODEL.RELATION_ON:
eval_sg_results = evaluate_sg(dataset=self.data_loader_test.dataset,
predictions=predictions,
predictions_pred=predictions_pred,
output_folder=output_folder,
**extra_args)
def build_model(cfg, arguments, local_rank, distributed):
return SceneGraphGeneration(cfg, arguments, local_rank, distributed)
| 13,581 | 43.097403 | 132 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/parser.py | """
Main code of scene parser
"""
import os
import logging
import torch
import copy
import torch.nn as nn
from .rcnn.modeling.detector.generalized_rcnn import GeneralizedRCNN
from .rcnn.solver import make_lr_scheduler
from .rcnn.solver import make_optimizer
from .rcnn.utils.checkpoint import SceneParserCheckpointer
from .rcnn.structures.image_list import to_image_list
from .rcnn.utils.comm import synchronize, get_rank
from .rcnn.modeling.relation_heads.relation_heads import build_roi_relation_head
SCENE_PAESER_DICT = ["sg_baseline", "sg_imp", "sg_msdn", "sg_grcnn", "sg_reldn"]
class SceneParser(GeneralizedRCNN):
"Scene Parser"
def __init__(self, cfg):
GeneralizedRCNN.__init__(self, cfg)
self.cfg = cfg
self.rel_heads = None
if cfg.MODEL.RELATION_ON and self.cfg.MODEL.ALGORITHM in SCENE_PAESER_DICT:
self.rel_heads = build_roi_relation_head(cfg, self.backbone.out_channels)
self._freeze_components(self.cfg)
def _freeze_components(self, cfg):
if cfg.MODEL.BACKBONE.FREEZE_PARAMETER:
for param in self.backbone.parameters():
param.requires_grad = False
if cfg.MODEL.RPN.FREEZE_PARAMETER:
for param in self.rpn.parameters():
param.requires_grad = False
if cfg.MODEL.ROI_BOX_HEAD.FREEZE_PARAMETER:
for param in self.roi_heads.parameters():
param.requires_grad = False
def train(self):
if self.cfg.MODEL.BACKBONE.FREEZE_PARAMETER:
self.backbone.eval()
else:
self.backbone.train()
if self.cfg.MODEL.RPN.FREEZE_PARAMETER:
self.rpn.eval()
else:
self.rpn.train()
if self.cfg.MODEL.ROI_BOX_HEAD.FREEZE_PARAMETER:
self.roi_heads.eval()
else:
self.roi_heads.train()
if self.rel_heads:
self.rel_heads.train()
self.training = True
def eval(self):
self.backbone.eval()
self.rpn.eval()
self.roi_heads.eval()
if self.rel_heads:
self.rel_heads.eval()
self.training = False
def _post_processing(self, result):
"""
Arguments:
result: (object_predictions, predicate_predictions)
Returns:
sort the object-predicate triplets, and output the top
"""
result_obj, result_pred = result
result_obj_new, result_pred_new = [], []
assert len(result_obj) == len(result_pred), "object list must have equal number to predicate list"
for result_obj_i, result_pred_i in zip(result_obj, result_pred):
obj_scores = result_obj_i.get_field("scores")
rel_inds = result_pred_i.get_field("idx_pairs")
pred_scores = result_pred_i.get_field("scores")
scores = torch.stack((
obj_scores[rel_inds[:,0]],
obj_scores[rel_inds[:,1]],
pred_scores[:, 1:].max(1)[0]
), 1).prod(1)
scores_sorted, order = scores.sort(0, descending=True)
result_pred_i = result_pred_i[order[:self.cfg.MODEL.ROI_RELATION_HEAD.TRIPLETS_PER_IMG]]
result_obj_new.append(result_obj_i)
result_pred_new.append(result_pred_i)
return (result_obj_new, result_pred_new)
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
images = to_image_list(images)
features = self.backbone(images.tensors)
proposals, proposal_losses = self.rpn(images, features, targets)
scene_parser_losses = {}
if self.roi_heads:
x, detections, roi_heads_loss = self.roi_heads(features, proposals, targets)
result = detections
scene_parser_losses.update(roi_heads_loss)
if self.rel_heads:
relation_features = features
# optimization: during training, if we share the feature extractor between
# the box and the relation heads, then we can reuse the features already computed
if (
self.training
and self.cfg.MODEL.ROI_RELATION_HEAD.SHARE_BOX_FEATURE_EXTRACTOR
):
relation_features = x
# During training, self.box() will return the unaltered proposals as "detections"
# this makes the API consistent during training and testing
x_pairs, detection_pairs, rel_heads_loss = self.rel_heads(relation_features, detections, targets)
scene_parser_losses.update(rel_heads_loss)
x = (x, x_pairs)
result = (detections, detection_pairs)
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
scene_parser_losses = {}
if self.training:
losses = {}
losses.update(scene_parser_losses)
losses.update(proposal_losses)
return losses
# NOTE: if object scores are updated in rel_heads, we need to ensure detections are updated accordingly
# result = self._post_processing(result)
return result
def get_save_dir(cfg):
train_mode = "joint" if cfg.MODEL.WEIGHT_DET == "" else "step"
iter_step = max([cfg.MODEL.ROI_RELATION_HEAD.IMP_FEATURE_UPDATE_STEP, \
cfg.MODEL.ROI_RELATION_HEAD.MSDN_FEATURE_UPDATE_STEP, \
cfg.MODEL.ROI_RELATION_HEAD.GRCNN_FEATURE_UPDATE_STEP])
alg = cfg.MODEL.ALGORITHM + '_relpn' if cfg.MODEL.USE_RELPN else cfg.MODEL.ALGORITHM
train_alg = (alg + '_' + train_mode + '_' + str(iter_step)) if "sg" in cfg.MODEL.ALGORITHM else cfg.MODEL.ALGORITHM
outdir = os.path.join(
cfg.DATASET.NAME + '_' + cfg.DATASET.MODE + '_' + cfg.DATASET.LOADER + cfg.MODEL.SESSION,
cfg.MODEL.BACKBONE.CONV_BODY, train_alg,
'BatchSize_{}'.format(cfg.DATASET.TRAIN_BATCH_SIZE),
'Base_LR_{}'.format(cfg.SOLVER.BASE_LR)
)
if not os.path.exists(os.path.join("checkpoints", outdir)):
os.makedirs(os.path.join("checkpoints", outdir))
return os.path.join("checkpoints", outdir)
def build_scene_parser(cfg):
return SceneParser(cfg)
def build_scene_parser_optimizer(cfg, model, local_rank=0, distributed=False):
save_to_disk = True
save_dir = get_save_dir(cfg)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
save_to_disk = get_rank() == 0
checkpointer = SceneParserCheckpointer(cfg, model, optimizer, scheduler, save_dir, save_to_disk,
logger=logging.getLogger("scene_graph_generation.checkpointer"))
model_weight = cfg.MODEL.WEIGHT_DET if cfg.MODEL.WEIGHT_DET != "" else cfg.MODEL.WEIGHT_IMG
extra_checkpoint_data = checkpointer.load(model_weight, resume=cfg.resume)
if cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOXES:
model.rel_heads.box_feature_extractor.load_state_dict(model.roi_heads.box.feature_extractor.state_dict())
model.rel_heads.box_predictor.load_state_dict(model.roi_heads.box.predictor.state_dict())
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
)
return optimizer, scheduler, checkpointer, extra_checkpoint_data
| 8,133 | 40.28934 | 119 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"_C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="rcnn",
version="0.1",
author="fmassa, jwyang",
url="https://github.com/jwyang/graph-rcnn.pytorch",
description="object detection in pytorch",
packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 2,027 | 27.971429 | 100 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/solver/lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 1,817 | 33.301887 | 80 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/solver/build.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .lr_scheduler import WarmupMultiStepLR
def make_optimizer(cfg, model):
params = []
lr = cfg.SOLVER.BASE_LR
for key, value in model.named_parameters():
if not value.requires_grad:
continue
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
| 972 | 29.40625 | 79 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/batch_norm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters
are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x):
# Cast all fixed parameters to half() if necessary
if x.dtype == torch.float16:
self.weight = self.weight.half()
self.bias = self.bias.half()
self.running_mean = self.running_mean.half()
self.running_var = self.running_var.half()
scale = self.weight * self.running_var.rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
| 1,094 | 33.21875 | 71 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/roi_pool.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from lib.scene_parser.rcnn import _C
# from apex import amp
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
output, argmax = _C.roi_pool_forward(
input, roi, spatial_scale, output_size[0], output_size[1]
)
ctx.save_for_backward(input, roi, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, argmax = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_pool_backward(
grad_output,
input,
rois,
argmax,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
)
return grad_input, None, None, None
roi_pool = _ROIPool.apply
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
# @amp.float_function
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
| 1,907 | 27.909091 | 74 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/roi_align.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from lib.scene_parser.rcnn import _C
# from apex import amp
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
# @amp.float_function
def forward(self, input, rois):
return roi_align(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 2,161 | 29.885714 | 85 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/smooth_l1_loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# TODO maybe push this to nn?
def smooth_l1_loss(input, target, beta=1. / 9, size_average=True):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if size_average:
return loss.mean()
return loss.sum()
| 481 | 27.352941 | 71 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/sigmoid_focal_loss.py | import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from lib.scene_parser.rcnn import _C
# TODO: Use JIT to replace CUDA implementation in the future.
class _SigmoidFocalLoss(Function):
@staticmethod
def forward(ctx, logits, targets, gamma, alpha):
ctx.save_for_backward(logits, targets)
num_classes = logits.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
losses = _C.sigmoid_focalloss_forward(
logits, targets, num_classes, gamma, alpha
)
return losses
@staticmethod
@once_differentiable
def backward(ctx, d_loss):
logits, targets = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_logits = _C.sigmoid_focalloss_backward(
logits, targets, d_loss, num_classes, gamma, alpha
)
return d_logits, None, None, None, None
sigmoid_focal_loss_cuda = _SigmoidFocalLoss.apply
def sigmoid_focal_loss_cpu(logits, targets, gamma, alpha):
num_classes = logits.shape[1]
gamma = gamma[0]
alpha = alpha[0]
dtype = targets.dtype
device = targets.device
class_range = torch.arange(1, num_classes+1, dtype=dtype, device=device).unsqueeze(0)
t = targets.unsqueeze(1)
p = torch.sigmoid(logits)
term1 = (1 - p) ** gamma * torch.log(p)
term2 = p ** gamma * torch.log(1 - p)
return -(t == class_range).float() * term1 * alpha - ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha)
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
device = logits.device
if logits.is_cuda:
loss_func = sigmoid_focal_loss_cuda
else:
loss_func = sigmoid_focal_loss_cpu
loss = loss_func(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "gamma=" + str(self.gamma)
tmpstr += ", alpha=" + str(self.alpha)
tmpstr += ")"
return tmpstr
| 2,345 | 29.467532 | 118 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import glob
import os.path
import torch
try:
from torch.utils.cpp_extension import load as load_ext
from torch.utils.cpp_extension import CUDA_HOME
except ImportError:
raise ImportError("The cpp layer extensions requires PyTorch 0.4 or higher")
def _load_C_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
this_dir = os.path.dirname(this_dir)
this_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
source = main_file + source_cpu
extra_cflags = []
if torch.cuda.is_available() and CUDA_HOME is not None:
source.extend(source_cuda)
extra_cflags = ["-DWITH_CUDA"]
source = [os.path.join(this_dir, s) for s in source]
extra_include_paths = [this_dir]
return load_ext(
"torchvision",
source,
extra_cflags=extra_cflags,
extra_include_paths=extra_include_paths,
)
_C = _load_C_extensions()
| 1,165 | 28.15 | 80 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
helper class that supports empty tensors on some nn functions.
Ideally, add support directly in PyTorch to empty tensors in
those functions.
This can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
import math
import torch
from torch import nn
from torch.nn.modules.utils import _ntuple
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
def forward(self, x):
if x.numel() > 0:
return super(Conv2d, self).forward(x)
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class ConvTranspose2d(torch.nn.ConvTranspose2d):
def forward(self, x):
if x.numel() > 0:
return super(ConvTranspose2d, self).forward(x)
# get output shape
output_shape = [
(i - 1) * d - 2 * p + (di * (k - 1) + 1) + op
for i, p, di, k, d, op in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
self.output_padding,
)
]
output_shape = [x.shape[0], self.bias.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class BatchNorm2d(torch.nn.BatchNorm2d):
def forward(self, x):
if x.numel() > 0:
return super(BatchNorm2d, self).forward(x)
# get output shape
output_shape = x.shape
return _NewEmptyTensorOp.apply(x, output_shape)
def interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
if (
scale_factor is not None
and isinstance(scale_factor, tuple)
and len(scale_factor) != dim
):
raise ValueError(
"scale_factor shape must match input shape. "
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
)
def _output_size(dim):
_check_size_scale_factor(dim)
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
# math.floor might return float in py2.7
return [
int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert isinstance(stride, (list, tuple))
assert isinstance(dilation, (list, tuple))
assert len(kernel_size) == 2
assert len(stride) == 2
assert len(dilation) == 2
padding = (
dilation[0] * (kernel_size[0] - 1) // 2,
dilation[1] * (kernel_size[1] - 1) // 2
)
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
padding = dilation * (kernel_size - 1) // 2
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from .layers import ModulatedDeformConv
offset_channels = offset_base_channels * 3 #default: 27
conv_block = ModulatedDeformConv
else:
from .layers import DeformConv
offset_channels = offset_base_channels * 2 #default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=1,
dilation=dilation
)
for l in [self.offset,]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
def forward(self, x):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset = self.offset(x)
x = self.conv(x, offset)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :18, :, :]
mask = offset_mask[:, -9:, :, :].sigmoid()
x = self.conv(x, offset, mask)
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
| 6,625 | 31.480392 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .batch_norm import FrozenBatchNorm2d
from .misc import Conv2d
from .misc import DFConv2d
from .misc import ConvTranspose2d
from .misc import BatchNorm2d
from .misc import interpolate
from .nms import nms
from .roi_align import ROIAlign
from .roi_align import roi_align
from .roi_pool import ROIPool
from .roi_pool import roi_pool
from .smooth_l1_loss import smooth_l1_loss
from .sigmoid_focal_loss import SigmoidFocalLoss
from .dcn.deform_conv_func import deform_conv, modulated_deform_conv
from .dcn.deform_conv_module import DeformConv, ModulatedDeformConv, ModulatedDeformConvPack
from .dcn.deform_pool_func import deform_roi_pooling
from .dcn.deform_pool_module import DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack
__all__ = [
"nms",
"roi_align",
"ROIAlign",
"roi_pool",
"ROIPool",
"smooth_l1_loss",
"Conv2d",
"DFConv2d",
"ConvTranspose2d",
"interpolate",
"BatchNorm2d",
"FrozenBatchNorm2d",
"SigmoidFocalLoss",
'deform_conv',
'modulated_deform_conv',
'DeformConv',
'ModulatedDeformConv',
'ModulatedDeformConvPack',
'deform_roi_pooling',
'DeformRoIPooling',
'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack',
]
| 1,327 | 26.666667 | 105 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/dcn/deform_conv_func.py | import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from lib.scene_parser.rcnn import _C
class DeformConvFunction(Function):
@staticmethod
def forward(
ctx,
input,
offset,
weight,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
im2col_step=64
):
if input is not None and input.dim() != 4:
raise ValueError(
"Expected 4D tensor as input, got {}D tensor instead.".format(
input.dim()))
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(
DeformConvFunction._output_size(input, weight, ctx.padding,
ctx.dilation, ctx.stride))
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
if not input.is_cuda:
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert (input.shape[0] %
cur_im2col_step) == 0, 'im2col step must divide batchsize'
_C.deform_conv_forward(
input,
weight,
offset,
output,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, weight = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if not grad_output.is_cuda:
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert (input.shape[0] %
cur_im2col_step) == 0, 'im2col step must divide batchsize'
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(
input,
offset,
grad_output,
grad_input,
grad_offset,
weight,
ctx.bufs_[0],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step
)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_parameters(
input,
offset,
grad_output,
grad_weight,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
1,
cur_im2col_step
)
return (grad_input, grad_offset, grad_weight, None, None, None, None, None)
@staticmethod
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range(input.dim() - 2):
in_size = input.size(d + 2)
pad = padding[d]
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
stride_ = stride[d]
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
if not all(map(lambda s: s > 0, output_size)):
raise ValueError(
"convolution input is too small (output would be {})".format(
'x'.join(map(str, output_size))))
return output_size
class ModulatedDeformConvFunction(Function):
@staticmethod
def forward(
ctx,
input,
offset,
mask,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1
):
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.with_bias = bias is not None
if not ctx.with_bias:
bias = input.new_empty(1) # fake tensor
if not input.is_cuda:
raise NotImplementedError
if weight.requires_grad or mask.requires_grad or offset.requires_grad \
or input.requires_grad:
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(
ModulatedDeformConvFunction._infer_shape(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
_C.modulated_deform_conv_forward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
output,
ctx._bufs[1],
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
_C.modulated_deform_conv_backward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
ctx._bufs[1],
grad_input,
grad_weight,
grad_bias,
grad_offset,
grad_mask,
grad_output,
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias
)
if not ctx.with_bias:
grad_bias = None
return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
None, None, None, None, None)
@staticmethod
def _infer_shape(ctx, input, weight):
n = input.size(0)
channels_out = weight.size(0)
height, width = input.shape[2:4]
kernel_h, kernel_w = weight.shape[2:4]
height_out = (height + 2 * ctx.padding -
(ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1
width_out = (width + 2 * ctx.padding -
(ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1
return n, channels_out, height_out, width_out
deform_conv = DeformConvFunction.apply
modulated_deform_conv = ModulatedDeformConvFunction.apply
| 8,312 | 30.608365 | 83 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/dcn/deform_pool_func.py | import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from lib.scene_parser.rcnn import _C
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(
ctx,
data,
rois,
offset,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0
):
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = out_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert 0.0 <= ctx.trans_std <= 1.0
if not data.is_cuda:
raise NotImplementedError
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
_C.deform_psroi_pooling_forward(
data,
rois,
offset,
output,
output_count,
ctx.no_trans,
ctx.spatial_scale,
ctx.out_channels,
ctx.group_size,
ctx.out_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std
)
if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
_C.deform_psroi_pooling_backward(
grad_output,
data,
rois,
offset,
output_count,
grad_input,
grad_offset,
ctx.no_trans,
ctx.spatial_scale,
ctx.out_channels,
ctx.group_size,
ctx.out_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std
)
return (grad_input, grad_rois, grad_offset, None, None, None, None, None, None, None, None)
deform_roi_pooling = DeformRoIPoolingFunction.apply
| 2,597 | 26.347368 | 99 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/dcn/deform_pool_module.py | from torch import nn
from .deform_pool_func import deform_roi_pooling
class DeformRoIPooling(nn.Module):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = out_size
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = out_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_channels=1024):
super(DeformRoIPoolingPack,
self).__init__(spatial_scale, out_size, out_channels, no_trans,
group_size, part_size, sample_per_part, trans_std)
self.deform_fc_channels = deform_fc_channels
if not no_trans:
self.offset_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_channels=1024):
super(ModulatedDeformRoIPoolingPack, self).__init__(
spatial_scale, out_size, out_channels, no_trans, group_size,
part_size, sample_per_part, trans_std)
self.deform_fc_channels = deform_fc_channels
if not no_trans:
self.offset_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
self.mask_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 1),
nn.Sigmoid())
self.mask_fc[2].weight.data.zero_()
self.mask_fc[2].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
mask = self.mask_fc(x.view(n, -1))
mask = mask.view(n, 1, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std) * mask
| 6,306 | 41.046667 | 79 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/layers/dcn/deform_conv_module.py | import math
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from .deform_conv_func import deform_conv, modulated_deform_conv
class DeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False
):
assert not bias
super(DeformConv, self).__init__()
self.with_bias = bias
assert in_channels % groups == 0, \
'in_channels {} cannot be divisible by groups {}'.format(
in_channels, groups)
assert out_channels % groups == 0, \
'out_channels {} cannot be divisible by groups {}'.format(
out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups,
*self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, offset):
return deform_conv(input, offset, self.weight, self.stride,
self.padding, self.dilation, self.groups,
self.deformable_groups)
def __repr__(self):
return "".join([
"{}(".format(self.__class__.__name__),
"in_channels={}, ".format(self.in_channels),
"out_channels={}, ".format(self.out_channels),
"kernel_size={}, ".format(self.kernel_size),
"stride={}, ".format(self.stride),
"dilation={}, ".format(self.dilation),
"padding={}, ".format(self.padding),
"groups={}, ".format(self.groups),
"deformable_groups={}, ".format(self.deformable_groups),
"bias={})".format(self.with_bias),
])
class ModulatedDeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True
):
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.weight = nn.Parameter(torch.Tensor(
out_channels,
in_channels // groups,
*self.kernel_size
))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, input, offset, mask):
return modulated_deform_conv(
input, offset, mask, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups, self.deformable_groups)
def __repr__(self):
return "".join([
"{}(".format(self.__class__.__name__),
"in_channels={}, ".format(self.in_channels),
"out_channels={}, ".format(self.out_channels),
"kernel_size={}, ".format(self.kernel_size),
"stride={}, ".format(self.stride),
"dilation={}, ".format(self.dilation),
"padding={}, ".format(self.padding),
"groups={}, ".format(self.groups),
"deformable_groups={}, ".format(self.deformable_groups),
"bias={})".format(self.with_bias),
])
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True):
super(ModulatedDeformConvPack, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, deformable_groups, bias)
self.conv_offset_mask = nn.Conv2d(
self.in_channels // self.groups,
self.deformable_groups * 3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv(
input, offset, mask, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups, self.deformable_groups)
| 5,802 | 31.601124 | 78 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/engine/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import time
import os
import torch
from tqdm import tqdm
from lib.config import cfg
from lib.data.datasets.evaluation import evaluate
from ..utils.comm import is_main_process, get_world_size
from ..utils.comm import all_gather
from ..utils.comm import synchronize
from ..utils.timer import Timer, get_time_str
from .bbox_aug import im_detect_bbox_aug
def compute_on_dataset(model, data_loader, device, timer=None):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for _, batch in enumerate(tqdm(data_loader)):
images, targets, image_ids = batch
with torch.no_grad():
if timer:
timer.tic()
if cfg.TEST.BBOX_AUG.ENABLED:
output = im_detect_bbox_aug(model, images, device)
else:
output = model(images.to(device))
if timer:
if not cfg.MODEL.DEVICE == 'cpu':
torch.cuda.synchronize()
timer.toc()
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("maskrcnn_benchmark.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
def inference(
model,
data_loader,
dataset_name,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = get_world_size()
logger = logging.getLogger("maskrcnn_benchmark.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
predictions = compute_on_dataset(model, data_loader, device, inference_timer)
# wait for all processes to complete before measuring the time
synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
logger.info(
"Total run time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
"Model inference time: {} ({} s / img per device, on {} devices)".format(
total_infer_time,
inference_timer.total_time * num_devices / len(dataset),
num_devices,
)
)
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
| 4,027 | 32.289256 | 96 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/engine/bbox_aug.py | import torch
import torchvision.transforms as TT
from lib.config import cfg
from lib.data import transforms as T
from lib.scene_parser.rcnn.structures.image_list import to_image_list
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.scene_parser.rcnn.modeling.roi_heads.box_head.inference import make_roi_box_post_processor
def im_detect_bbox_aug(model, images, device):
# Collect detections computed under different transformations
boxlists_ts = []
for _ in range(len(images)):
boxlists_ts.append([])
def add_preds_t(boxlists_t):
for i, boxlist_t in enumerate(boxlists_t):
if len(boxlists_ts[i]) == 0:
# The first one is identity transform, no need to resize the boxlist
boxlists_ts[i].append(boxlist_t)
else:
# Resize the boxlist as the first one
boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))
# Compute detections for the original image (identity transform)
boxlists_i = im_detect_bbox(
model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device
)
add_preds_t(boxlists_i)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
boxlists_hf = im_detect_bbox_hflip(
model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device
)
add_preds_t(boxlists_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
boxlists_scl = im_detect_bbox_scale(
model, images, scale, max_size, device
)
add_preds_t(boxlists_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
boxlists_scl_hf = im_detect_bbox_scale(
model, images, scale, max_size, device, hflip=True
)
add_preds_t(boxlists_scl_hf)
# Merge boxlists detected by different bbox aug params
boxlists = []
for i, boxlist_ts in enumerate(boxlists_ts):
bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts])
scores = torch.cat([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
boxlist.add_field('scores', scores)
boxlists.append(boxlist)
# Apply NMS and limit the final detections
results = []
post_processor = make_roi_box_post_processor(cfg)
for boxlist in boxlists:
results.append(post_processor.filter_results(boxlist, cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES))
return results
def im_detect_bbox(model, images, target_scale, target_max_size, device):
"""
Performs bbox detection on the original image.
"""
transform = TT.Compose([
T.Resize(target_scale, target_max_size),
TT.ToTensor(),
T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255
)
])
images = [transform(image) for image in images]
images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)
return model(images.to(device))
def im_detect_bbox_hflip(model, images, target_scale, target_max_size, device):
"""
Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
transform = TT.Compose([
T.Resize(target_scale, target_max_size),
TT.RandomHorizontalFlip(1.0),
TT.ToTensor(),
T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255
)
])
images = [transform(image) for image in images]
images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)
boxlists = model(images.to(device))
# Invert the detections computed on the flipped image
boxlists_inv = [boxlist.transpose(0) for boxlist in boxlists]
return boxlists_inv
def im_detect_bbox_scale(model, images, target_scale, target_max_size, device, hflip=False):
"""
Computes bbox detections at the given scale.
Returns predictions in the scaled image space.
"""
if hflip:
boxlists_scl = im_detect_bbox_hflip(model, images, target_scale, target_max_size, device)
else:
boxlists_scl = im_detect_bbox(model, images, target_scale, target_max_size, device)
return boxlists_scl
| 4,418 | 36.449153 | 99 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/engine/trainer.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import time
import torch
import torch.distributed as dist
from lib.scene_parser.rcnn.utils.comm import get_world_size
from lib.scene_parser.rcnn.utils.metric_logger import MetricLogger
from apex import amp
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
if any(len(target) < 1 for target in targets):
logger.error(f"Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}" )
continue
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
scheduler.step()
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
# Note: If mixed precision is not used, this ends up doing nothing
# Otherwise apply loss scaling for mixed-precision recipe
with amp.scale_loss(losses, optimizer) as scaled_losses:
scaled_losses.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
| 4,251 | 33.290323 | 146 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/c2_model_loading.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import pickle
from collections import OrderedDict
import torch
from .model_serialization import load_state_dict
from .registry import Registry
def _rename_basic_resnet_weights(layer_keys):
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [k.replace(".w", ".weight") for k in layer_keys]
layer_keys = [k.replace(".bn", "_bn") for k in layer_keys]
layer_keys = [k.replace(".b", ".bias") for k in layer_keys]
layer_keys = [k.replace("_bn.s", "_bn.scale") for k in layer_keys]
layer_keys = [k.replace(".biasranch", ".branch") for k in layer_keys]
layer_keys = [k.replace("bbox.pred", "bbox_pred") for k in layer_keys]
layer_keys = [k.replace("cls.score", "cls_score") for k in layer_keys]
layer_keys = [k.replace("res.conv1_", "conv1_") for k in layer_keys]
# RPN / Faster RCNN
layer_keys = [k.replace(".biasbox", ".bbox") for k in layer_keys]
layer_keys = [k.replace("conv.rpn", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox.pred", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [k.replace("rpn.cls.logits", "rpn.cls_logits") for k in layer_keys]
# Affine-Channel -> BatchNorm enaming
layer_keys = [k.replace("_bn.scale", "_bn.weight") for k in layer_keys]
# Make torchvision-compatible
layer_keys = [k.replace("conv1_bn.", "bn1.") for k in layer_keys]
layer_keys = [k.replace("res2.", "layer1.") for k in layer_keys]
layer_keys = [k.replace("res3.", "layer2.") for k in layer_keys]
layer_keys = [k.replace("res4.", "layer3.") for k in layer_keys]
layer_keys = [k.replace("res5.", "layer4.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2a_bn.", ".bn1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2b_bn.", ".bn2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
layer_keys = [k.replace(".branch2c_bn.", ".bn3.") for k in layer_keys]
layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys]
layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys]
# GroupNorm
layer_keys = [k.replace("conv1.gn.s", "bn1.weight") for k in layer_keys]
layer_keys = [k.replace("conv1.gn.bias", "bn1.bias") for k in layer_keys]
layer_keys = [k.replace("conv2.gn.s", "bn2.weight") for k in layer_keys]
layer_keys = [k.replace("conv2.gn.bias", "bn2.bias") for k in layer_keys]
layer_keys = [k.replace("conv3.gn.s", "bn3.weight") for k in layer_keys]
layer_keys = [k.replace("conv3.gn.bias", "bn3.bias") for k in layer_keys]
layer_keys = [k.replace("downsample.0.gn.s", "downsample.1.weight") \
for k in layer_keys]
layer_keys = [k.replace("downsample.0.gn.bias", "downsample.1.bias") \
for k in layer_keys]
return layer_keys
def _rename_fpn_weights(layer_keys, stage_names):
for mapped_idx, stage_name in enumerate(stage_names, 1):
suffix = ""
if mapped_idx < 4:
suffix = ".lateral"
layer_keys = [
k.replace("fpn.inner.layer{}.sum{}".format(stage_name, suffix), "fpn_inner{}".format(mapped_idx)) for k in layer_keys
]
layer_keys = [k.replace("fpn.layer{}.sum".format(stage_name), "fpn_layer{}".format(mapped_idx)) for k in layer_keys]
layer_keys = [k.replace("rpn.conv.fpn2", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox_pred.fpn2", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [
k.replace("rpn.cls_logits.fpn2", "rpn.cls_logits") for k in layer_keys
]
return layer_keys
def _rename_weights_for_resnet(weights, stage_names):
original_keys = sorted(weights.keys())
layer_keys = sorted(weights.keys())
# for X-101, rename output to fc1000 to avoid conflicts afterwards
layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys]
layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys]
# performs basic renaming: _ -> . , etc
layer_keys = _rename_basic_resnet_weights(layer_keys)
# FPN
layer_keys = _rename_fpn_weights(layer_keys, stage_names)
# Mask R-CNN
layer_keys = [k.replace("mask.fcn.logits", "mask_fcn_logits") for k in layer_keys]
layer_keys = [k.replace(".[mask].fcn", "mask_fcn") for k in layer_keys]
layer_keys = [k.replace("conv5.mask", "conv5_mask") for k in layer_keys]
# Keypoint R-CNN
layer_keys = [k.replace("kps.score.lowres", "kps_score_lowres") for k in layer_keys]
layer_keys = [k.replace("kps.score", "kps_score") for k in layer_keys]
layer_keys = [k.replace("conv.fcn", "conv_fcn") for k in layer_keys]
# Rename for our RPN structure
layer_keys = [k.replace("rpn.", "rpn.head.") for k in layer_keys]
key_map = {k: v for k, v in zip(original_keys, layer_keys)}
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights")
max_c2_key_size = max([len(k) for k in original_keys if "_momentum" not in k])
new_weights = OrderedDict()
for k in original_keys:
v = weights[k]
if "_momentum" in k:
continue
# if 'fc1000' in k:
# continue
w = torch.from_numpy(v)
# if "bn" in k:
# w = w.view(1, -1, 1, 1)
logger.info("C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]))
new_weights[key_map[k]] = w
return new_weights
def _load_c2_pickled_weights(file_path):
with open(file_path, "rb") as f:
if torch._six.PY3:
data = pickle.load(f, encoding="latin1")
else:
data = pickle.load(f)
if "blobs" in data:
weights = data["blobs"]
else:
weights = data
return weights
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
logger = logging.getLogger(__name__)
logger.info("Remapping conv weights for deformable conv weights")
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*layer{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
logger.info("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
state_dict[new_key] = state_dict[old_key]
del state_dict[old_key]
return state_dict
_C2_STAGE_NAMES = {
"R-50": ["1.2", "2.3", "3.5", "4.2"],
"R-101": ["1.2", "2.3", "3.22", "4.2"],
"R-152": ["1.2", "2.7", "3.35", "4.2"],
}
C2_FORMAT_LOADER = Registry()
@C2_FORMAT_LOADER.register("R-50-C4")
@C2_FORMAT_LOADER.register("R-50-C5")
@C2_FORMAT_LOADER.register("R-101-C4")
@C2_FORMAT_LOADER.register("R-101-C5")
@C2_FORMAT_LOADER.register("R-50-FPN")
@C2_FORMAT_LOADER.register("R-50-FPN-X")
@C2_FORMAT_LOADER.register("R-50-FPN-RETINANET")
@C2_FORMAT_LOADER.register("R-101-FPN")
@C2_FORMAT_LOADER.register("R-101-FPN-X")
@C2_FORMAT_LOADER.register("R-101-FPN-RETINANET")
@C2_FORMAT_LOADER.register("R-152-FPN")
@C2_FORMAT_LOADER.register("R-152-FPN-X")
def load_resnet_c2_format(cfg, f):
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body.replace("-C4", "").replace("-C5", "").replace("-FPN", "")
arch = arch.replace("-X", "")
arch = arch.replace("-RETINANET", "")
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
# ***********************************
# for deformable convolutional layer
state_dict = _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg)
# ***********************************
return dict(model=state_dict)
def load_c2_format(cfg, f):
return C2_FORMAT_LOADER[cfg.MODEL.BACKBONE.CONV_BODY](cfg, f)
| 8,444 | 39.023697 | 129 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/metric_logger.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import defaultdict
from collections import deque
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
| 1,862 | 26.80597 | 82 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/checkpoint.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import torch
from .model_serialization import load_state_dict
from .c2_model_loading import load_c2_format
from .imports import import_file
from .model_zoo import cache_url
class Checkpointer(object):
def __init__(
self,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
inference=False
):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.save_dir = save_dir
self.save_to_disk = save_to_disk
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
self.inference = inference
def save(self, name, **kwargs):
if not self.save_dir:
return
if not self.save_to_disk:
return
data = {}
data["model"] = self.model.state_dict()
if self.optimizer is not None:
data["optimizer"] = self.optimizer.state_dict()
if self.scheduler is not None:
data["scheduler"] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, "{}.pth".format(name))
self.logger.info("Saving checkpoint to {}".format(save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
def load(self, f=None, resume=0, use_latest=False):
if self.has_last_checkpoint() and use_latest and resume > 0:
# override argument with existing checkpoint
f = self.get_last_checkpoint_file()
elif self.has_checkpoint(resume) and resume > 0:
f = self.get_checkpoint_file(resume)
if not f:
# no checkpoint could be found
self.logger.info("No checkpoint found. Initializing model from scratch")
return {}
self.logger.info("Loading checkpoint from {}".format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
# if "optimizer" in checkpoint and self.optimizer and "sg" in f and not self.inference:
# self.logger.info("Loading optimizer from {}".format(f))
# self.optimizer.load_state_dict(checkpoint.pop("optimizer"))
# if "scheduler" in checkpoint and self.scheduler and "sg" in f and not self.inference:
# self.logger.info("Loading scheduler from {}".format(f))
# self.scheduler.load_state_dict(checkpoint.pop("scheduler"))
# checkpoint['iteration'] = resume # if we load detector, the we should not use its start iteration
return checkpoint
def has_last_checkpoint(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
return os.path.exists(save_file)
def get_last_checkpoint_file(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
last_saved = ""
return last_saved
def has_checkpoint(self, resume):
save_file = os.path.join(self.save_dir, "checkpoint_{:07d}.pth".format(resume))
return os.path.exists(save_file)
def get_checkpoint_file(self, resume):
save_file = os.path.join(self.save_dir, "checkpoint_{:07d}.pth".format(resume))
return save_file
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, "last_checkpoint")
with open(save_file, "w") as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f, map_location=torch.device("cpu"))
def _load_model(self, checkpoint):
load_state_dict(self.model, checkpoint.pop("model"))
class DetectronCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(DetectronCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"lib.config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(DetectronCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
class SceneParserCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(SceneParserCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger, cfg.inference
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"lib.config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(SceneParserCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
| 6,815 | 34.5 | 108 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/comm.py | """
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 3,372 | 27.584746 | 84 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/model_zoo.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import sys
try:
from torch.hub import _download_url_to_file
from torch.hub import urlparse
from torch.hub import HASH_REGEX
except ImportError:
from torch.utils.model_zoo import _download_url_to_file
from torch.utils.model_zoo import urlparse
from torch.utils.model_zoo import HASH_REGEX
from .comm import is_main_process
from .comm import synchronize
# very similar to https://github.com/pytorch/pytorch/blob/master/torch/utils/model_zoo.py
# but with a few improvements and modifications
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv("TORCH_HOME", "~/.torch"))
model_dir = os.getenv("TORCH_MODEL_ZOO", os.path.join(torch_home, "models"))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file
| 2,997 | 47.354839 | 135 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/collect_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import PIL
from torch.utils.collect_env import get_pretty_env_info
def get_pil_version():
return "\n Pillow ({})".format(PIL.__version__)
def collect_env_info():
env_str = get_pretty_env_info()
env_str += get_pil_version()
return env_str
| 338 | 21.6 | 71 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/model_serialization.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
import logging
import torch
from .imports import import_file
def align_and_update_state_dicts(model_state_dict, loaded_state_dict):
"""
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# loaded_key string, if it matches
match_matrix = [
len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys
]
match_matrix = torch.as_tensor(match_matrix).view(
len(current_keys), len(loaded_keys)
)
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_size = max([len(key) for key in current_keys]) if current_keys else 1
max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
# logger = logging.getLogger(__name__)
logger = logging.getLogger("scene_graph_generation.checkpointer")
missed_current_keys = []
for idx_new, idx_old in enumerate(idxs.tolist()):
if idx_old == -1:
missed_current_keys.append(current_keys[idx_new])
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
model_state_dict[key] = loaded_state_dict[key_old]
# logger.info(
# log_str_template.format(
# key,
# max_size,
# key_old,
# max_size_loaded,
# tuple(loaded_state_dict[key_old].shape),
# )
# )
logger.info("missed keys: {}".format(missed_current_keys))
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def load_state_dict(model, loaded_state_dict):
model_state_dict = model.state_dict()
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.")
align_and_update_state_dicts(model_state_dict, loaded_state_dict)
# use strict loading
model.load_state_dict(model_state_dict)
| 3,683 | 42.857143 | 91 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/visualize.py | import cv2
import torch
def select_top_predictions(predictions, confidence_threshold=0.2):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(labels, palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
return image
def overlay_class_names(image, predictions, categories):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [categories[i] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(
image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
)
return image
def overlay_question_answers(image, qas, max_num=10):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
height, width = image.shape[0], image.shape[1]
template = "Question: {} \t Answer: {}"
for i, qa in enumerate(qas):
x, y = 20, (height - 20 - 20 * i)
s = template.format(qa[0], qa[1])
cv2.putText(
image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 0), 1
)
return image
| 3,482 | 35.663158 | 103 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/utils/imports.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
if torch._six.PY3:
import importlib
import importlib.util
import sys
# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
def import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module
else:
import imp
def import_file(module_name, file_path, make_importable=None):
module = imp.load_source(module_name, file_path)
return module
| 843 | 34.166667 | 168 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/data/datasets/voc.py | import os
import torch
import torch.utils.data
from PIL import Image
import sys
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
from maskrcnn_benchmark.structures.bounding_box import BoxList
class PascalVOCDataset(torch.utils.data.Dataset):
CLASSES = (
"__background__ ",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
def __init__(self, data_dir, split, use_difficult=False, transforms=None):
self.root = data_dir
self.image_set = split
self.keep_difficult = use_difficult
self.transforms = transforms
self._annopath = os.path.join(self.root, "Annotations", "%s.xml")
self._imgpath = os.path.join(self.root, "JPEGImages", "%s.jpg")
self._imgsetpath = os.path.join(self.root, "ImageSets", "Main", "%s.txt")
with open(self._imgsetpath % self.image_set) as f:
self.ids = f.readlines()
self.ids = [x.strip("\n") for x in self.ids]
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
cls = PascalVOCDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
self.categories = dict(zip(range(len(cls)), cls))
def __getitem__(self, index):
img_id = self.ids[index]
img = Image.open(self._imgpath % img_id).convert("RGB")
target = self.get_groundtruth(index)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, index
def __len__(self):
return len(self.ids)
def get_groundtruth(self, index):
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
anno = self._preprocess_annotation(anno)
height, width = anno["im_info"]
target = BoxList(anno["boxes"], (width, height), mode="xyxy")
target.add_field("labels", anno["labels"])
target.add_field("difficult", anno["difficult"])
return target
def _preprocess_annotation(self, target):
boxes = []
gt_classes = []
difficult_boxes = []
TO_REMOVE = 1
for obj in target.iter("object"):
difficult = int(obj.find("difficult").text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find("name").text.lower().strip()
bb = obj.find("bndbox")
# Make pixel indexes 0-based
# Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211"
box = [
bb.find("xmin").text,
bb.find("ymin").text,
bb.find("xmax").text,
bb.find("ymax").text,
]
bndbox = tuple(
map(lambda x: x - TO_REMOVE, list(map(int, box)))
)
boxes.append(bndbox)
gt_classes.append(self.class_to_ind[name])
difficult_boxes.append(difficult)
size = target.find("size")
im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
res = {
"boxes": torch.tensor(boxes, dtype=torch.float32),
"labels": torch.tensor(gt_classes),
"difficult": torch.tensor(difficult_boxes),
"im_info": im_info,
}
return res
def get_img_info(self, index):
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
size = anno.find("size")
im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
return {"height": im_info[0], "width": im_info[1]}
def map_class_id_to_class_name(self, class_id):
return PascalVOCDataset.CLASSES[class_id]
| 4,168 | 29.654412 | 118 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/data/datasets/concat_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method for querying the sizes of the image
"""
def get_idxs(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_img_info(self, idx):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_img_info(sample_idx)
| 766 | 30.958333 | 72 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/data/datasets/coco.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torchvision
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
min_keypoints_per_image = 10
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno):
ids.append(img_id)
self.ids = ids
self.categories = {cat['id']: cat['name'] for cat in self.coco.cats.values()}
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self._transforms = transforms
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
if anno and "segmentation" in anno[0]:
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size, mode='poly')
target.add_field("masks", masks)
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = PersonKeypoints(keypoints, img.size)
target.add_field("keypoints", keypoints)
target = target.clip_to_image(remove_empty=True)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
| 3,783 | 35.038095 | 85 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/data/datasets/evaluation/coco/coco_eval.py | import logging
import tempfile
import os
import torch
from collections import OrderedDict
from tqdm import tqdm
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
def do_coco_evaluation(
dataset,
predictions,
box_only,
output_folder,
iou_types,
expected_results,
expected_results_sigma_tol,
):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = COCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
logger.info("Preparing results for COCO format")
coco_results = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
if "segm" in iou_types:
logger.info("Preparing segm results")
coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset)
if 'keypoints' in iou_types:
logger.info('Preparing keypoints results')
coco_results['keypoints'] = prepare_for_coco_keypoint(predictions, dataset)
results = COCOResults(*iou_types)
logger.info("Evaluating predictions")
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + ".json")
res = evaluate_predictions_on_coco(
dataset.coco, coco_results[iou_type], file_path, iou_type
)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, "coco_results.pth"))
return results, coco_results
def prepare_for_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(predictions, dataset):
import pycocotools.mask as mask_util
import numpy as np
masker = Masker(threshold=0.5, padding=1)
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in tqdm(enumerate(predictions)):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
masks = prediction.get_field("mask")
# t = time.time()
# Masker is necessary only if masks haven't been already resized.
if list(masks.shape[-2:]) != [image_height, image_width]:
masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
masks = masks[0]
# logger.info('Time mask: {}'.format(time.time() - t))
# prediction = prediction.convert('xywh')
# boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
# rles = prediction.get_field('mask')
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction.bbox) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]['width']
image_height = dataset.coco.imgs[original_id]['height']
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert('xywh')
boxes = prediction.bbox.tolist()
scores = prediction.get_field('scores').tolist()
labels = prediction.get_field('labels').tolist()
keypoints = prediction.get_field('keypoints')
keypoints = keypoints.resize((image_width, image_height))
keypoints = keypoints.keypoints.view(keypoints.keypoints.shape[0], -1).tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend([{
'image_id': original_id,
'category_id': mapped_labels[k],
'keypoints': keypoint,
'score': scores[k]} for k, keypoint in enumerate(keypoints)])
return coco_results
# inspired from Detectron
def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def evaluate_predictions_on_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(str(json_result_file)) if coco_results else COCO()
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
class COCOResults(object):
METRICS = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"box_proposal": [
"AR@100",
"ARs@100",
"ARm@100",
"ARl@100",
"AR@1000",
"ARs@1000",
"ARm@1000",
"ARl@1000",
],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm", "keypoints")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResults.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
results = '\n'
for task, metrics in self.results.items():
results += 'Task: {}\n'.format(task)
metric_names = metrics.keys()
metric_vals = ['{:.4f}'.format(v) for v in metrics.values()]
results += (', '.join(metric_names) + '\n')
results += (', '.join(metric_vals) + '\n')
return results
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
logger = logging.getLogger("maskrcnn_benchmark.inference")
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = (
"{} > {} sanity check (actual vs. expected): "
"{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = "FAIL: " + msg
logger.error(msg)
else:
msg = "PASS: " + msg
logger.info(msg)
| 14,055 | 34.405542 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/pair_matcher.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class PairMatcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_pair_quality_matrix):
"""
Args:
match_pair_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth pairs and N predicted pairs.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_pair_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError(
"No ground-truth boxes available for one of the images "
"during training")
else:
raise ValueError(
"No proposal boxes available for one of the images "
"during training")
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_pair_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches[below_low_threshold] = PairMatcher.BELOW_LOW_THRESHOLD
matches[between_thresholds] = PairMatcher.BETWEEN_THRESHOLDS
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
| 5,155 | 44.628319 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/matcher.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError(
"No ground-truth boxes available for one of the images "
"during training")
else:
raise ValueError(
"No proposal boxes available for one of the images "
"during training")
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches[below_low_threshold] = Matcher.BELOW_LOW_THRESHOLD
matches[between_thresholds] = Matcher.BETWEEN_THRESHOLDS
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
| 5,129 | 44.39823 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/make_layers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
from torch import nn
from torch.nn import functional as F
from ..config import cfg
from ..layers import Conv2d
from .poolers import Pooler
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
def group_norm(out_channels, affine=True, divisor=1):
out_channels = out_channels // divisor
dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor
num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor
eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5
return torch.nn.GroupNorm(
get_group_gn(out_channels, dim_per_gp, num_groups),
out_channels,
eps,
affine
)
def make_conv3x3(
in_channels,
out_channels,
dilation=1,
stride=1,
use_gn=False,
use_relu=False,
kaiming_init=True
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
if kaiming_init:
nn.init.kaiming_normal_(
conv.weight, mode="fan_out", nonlinearity="relu"
)
else:
torch.nn.init.normal_(conv.weight, std=0.01)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
def make_fc(dim_in, hidden_dim, use_gn=False):
'''
Caffe2 implementation uses XavierFill, which in fact
corresponds to kaiming_uniform_ in PyTorch
'''
if use_gn:
fc = nn.Linear(dim_in, hidden_dim, bias=False)
nn.init.kaiming_uniform_(fc.weight, a=1)
return nn.Sequential(fc, group_norm(hidden_dim))
fc = nn.Linear(dim_in, hidden_dim)
nn.init.kaiming_uniform_(fc.weight, a=1)
nn.init.constant_(fc.bias, 0)
return fc
def conv_with_kaiming_uniform(use_gn=False, use_relu=False):
def make_conv(
in_channels, out_channels, kernel_size, stride=1, dilation=1
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=dilation * (kernel_size - 1) // 2,
dilation=dilation,
bias=False if use_gn else True
)
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(conv.weight, a=1)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
return make_conv
| 3,496 | 27.430894 | 78 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
| 400 | 22.588235 | 97 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/balanced_positive_negative_pair_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# object pair sampler, implemented by Jianwei Yang
import torch
class BalancedPositiveNegativePairSampler(object):
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image, positive_fraction):
"""
Arguments:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentage of positive elements per batch
"""
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs):
"""
Arguments:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
positives.
Returns:
pos_idx (list[tensor])
neg_idx (list[tensor])
Returns two lists of binary masks for each image.
The first list contains the positive elements that were selected,
and the second list the negative example.
"""
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)
negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = self.batch_size_per_image - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
# create binary mask from indices
pos_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
neg_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
pos_idx_per_image_mask[pos_idx_per_image] = 1
neg_idx_per_image_mask[neg_idx_per_image] = 1
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
| 2,773 | 38.628571 | 90 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/poolers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from ..layers import ROIAlign
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
"""
Arguments:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return target_lvls.to(torch.int64) - self.k_min
class Pooler(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level).to(dtype)
return result
def make_pooler(cfg, head_name):
resolution = cfg.MODEL[head_name].POOLER_RESOLUTION
scales = cfg.MODEL[head_name].POOLER_SCALES
sampling_ratio = cfg.MODEL[head_name].POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
return pooler
| 4,544 | 32.91791 | 90 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/balanced_positive_negative_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class BalancedPositiveNegativeSampler(object):
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image, positive_fraction):
"""
Arguments:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentage of positive elements per batch
"""
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs):
"""
Arguments:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
positives.
Returns:
pos_idx (list[tensor])
neg_idx (list[tensor])
Returns two lists of binary masks for each image.
The first list contains the positive elements that were selected,
and the second list the negative example.
"""
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)
negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = self.batch_size_per_image - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
# create binary mask from indices
pos_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
neg_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
pos_idx_per_image_mask[pos_idx_per_image] = 1
neg_idx_per_image_mask[neg_idx_per_image] = 1
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
| 2,718 | 38.405797 | 90 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/box_coder.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import torch
class BoxCoder(object):
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
"""
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
"""
Arguments:
weights (4-element tuple)
bbox_xform_clip (float)
"""
self.weights = weights
self.bbox_xform_clip = bbox_xform_clip
def encode(self, reference_boxes, proposals):
"""
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
TO_REMOVE = 1 # TODO remove
ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE
ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE
ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths
ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights
gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE
gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE
gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = self.weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
def decode(self, rel_codes, boxes):
"""
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Arguments:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
"""
boxes = boxes.to(rel_codes.dtype)
TO_REMOVE = 1 # TODO remove
widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE
heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = rel_codes[:, 0::4] / wx
dy = rel_codes[:, 1::4] / wy
dw = rel_codes[:, 2::4] / ww
dh = rel_codes[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.bbox_xform_clip)
dh = torch.clamp(dh, max=self.bbox_xform_clip)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(rel_codes)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
| 3,367 | 34.083333 | 86 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/backbone/resnet.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Variant of the resnet module that takes cfg as an argument.
Example usage. Strings may be specified in the config file.
model = ResNet(
"StemWithFixedBatchNorm",
"BottleneckWithFixedBatchNorm",
"ResNet50StagesTo4",
)
OR:
model = ResNet(
"StemWithGN",
"BottleneckWithGN",
"ResNet50StagesTo4",
)
Custom implementations may be written in user code and hooked in via the
`register_*` functions.
"""
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from lib.scene_parser.rcnn.layers import FrozenBatchNorm2d
from lib.scene_parser.rcnn.layers import Conv2d
from lib.scene_parser.rcnn.layers import DFConv2d
from lib.scene_parser.rcnn.modeling.make_layers import group_norm
from lib.scene_parser.rcnn.utils.registry import Registry
# ResNet stage specification
StageSpec = namedtuple(
"StageSpec",
[
"index", # Index of the stage, eg 1, 2, ..,. 5
"block_count", # Number of residual blocks in the stage
"return_features", # True => return the last feature map from this stage
],
)
# -----------------------------------------------------------------------------
# Standard ResNet models
# -----------------------------------------------------------------------------
# ResNet-50 (including all stages)
ResNet50StagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True))
)
# ResNet-50 up to stage 4 (excludes stage 5)
ResNet50StagesTo4 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True))
)
# ResNet-101 (including all stages)
ResNet101StagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, False), (4, 3, True))
)
# ResNet-101 up to stage 4 (excludes stage 5)
ResNet101StagesTo4 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, True))
)
# ResNet-50-FPN (including all stages)
ResNet50FPNStagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True))
)
# ResNet-101-FPN (including all stages)
ResNet101FPNStagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True))
)
# ResNet-152-FPN (including all stages)
ResNet152FPNStagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 8, True), (3, 36, True), (4, 3, True))
)
class ResNet(nn.Module):
def __init__(self, cfg):
super(ResNet, self).__init__()
# If we want to use the cfg in forward(), then we should make a copy
# of it and store it for later use:
# self.cfg = cfg.clone()
# Translate string names to implementations
stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC]
stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY]
transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC]
# Construct the stem module
self.stem = stem_module(cfg)
# Constuct the specified ResNet stages
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
stage2_bottleneck_channels = num_groups * width_per_group
stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
self.stages = []
self.return_features = {}
for stage_spec in stage_specs:
name = "layer" + str(stage_spec.index)
stage2_relative_factor = 2 ** (stage_spec.index - 1)
bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
out_channels = stage2_out_channels * stage2_relative_factor
stage_with_dcn = cfg.MODEL.RESNETS.STAGE_WITH_DCN[stage_spec.index -1]
module = _make_stage(
transformation_module,
in_channels,
bottleneck_channels,
out_channels,
stage_spec.block_count,
num_groups,
cfg.MODEL.RESNETS.STRIDE_IN_1X1,
first_stride=int(stage_spec.index > 1) + 1,
dcn_config={
"stage_with_dcn": stage_with_dcn,
"with_modulated_dcn": cfg.MODEL.RESNETS.WITH_MODULATED_DCN,
"deformable_groups": cfg.MODEL.RESNETS.DEFORMABLE_GROUPS,
}
)
in_channels = out_channels
self.add_module(name, module)
self.stages.append(name)
self.return_features[name] = stage_spec.return_features
# Optionally freeze (requires_grad=False) parts of the backbone
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)
def _freeze_backbone(self, freeze_at):
if freeze_at < 0:
return
for stage_index in range(freeze_at):
if stage_index == 0:
m = self.stem # stage 0 is the stem
else:
m = getattr(self, "layer" + str(stage_index))
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
outputs = []
x = self.stem(x)
for stage_name in self.stages:
x = getattr(self, stage_name)(x)
if self.return_features[stage_name]:
outputs.append(x)
return outputs
class ResNetHead(nn.Module):
def __init__(
self,
block_module,
stages,
num_groups=1,
width_per_group=64,
stride_in_1x1=True,
stride_init=None,
res2_out_channels=256,
dilation=1,
dcn_config={}
):
super(ResNetHead, self).__init__()
stage2_relative_factor = 2 ** (stages[0].index - 1)
stage2_bottleneck_channels = num_groups * width_per_group
out_channels = res2_out_channels * stage2_relative_factor
in_channels = out_channels // 2
bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
block_module = _TRANSFORMATION_MODULES[block_module]
self.stages = []
stride = stride_init
for stage in stages:
name = "layer" + str(stage.index)
if not stride:
stride = int(stage.index > 1) + 1
module = _make_stage(
block_module,
in_channels,
bottleneck_channels,
out_channels,
stage.block_count,
num_groups,
stride_in_1x1,
first_stride=stride,
dilation=dilation,
dcn_config=dcn_config
)
stride = None
self.add_module(name, module)
self.stages.append(name)
self.out_channels = out_channels
def forward(self, x):
for stage in self.stages:
x = getattr(self, stage)(x)
return x
def _make_stage(
transformation_module,
in_channels,
bottleneck_channels,
out_channels,
block_count,
num_groups,
stride_in_1x1,
first_stride,
dilation=1,
dcn_config={}
):
blocks = []
stride = first_stride
for _ in range(block_count):
blocks.append(
transformation_module(
in_channels,
bottleneck_channels,
out_channels,
num_groups,
stride_in_1x1,
stride,
dilation=dilation,
dcn_config=dcn_config
)
)
stride = 1
in_channels = out_channels
return nn.Sequential(*blocks)
class Bottleneck(nn.Module):
def __init__(
self,
in_channels,
bottleneck_channels,
out_channels,
num_groups,
stride_in_1x1,
stride,
dilation,
norm_func,
dcn_config
):
super(Bottleneck, self).__init__()
self.downsample = None
if in_channels != out_channels:
down_stride = stride if dilation == 1 else 1
self.downsample = nn.Sequential(
Conv2d(
in_channels, out_channels,
kernel_size=1, stride=down_stride, bias=False
),
norm_func(out_channels),
)
for modules in [self.downsample,]:
for l in modules.modules():
if isinstance(l, Conv2d):
nn.init.kaiming_uniform_(l.weight, a=1)
if dilation > 1:
stride = 1 # reset to be 1
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
)
self.bn1 = norm_func(bottleneck_channels)
# TODO: specify init for the above
with_dcn = dcn_config.get("stage_with_dcn", False)
if with_dcn:
deformable_groups = dcn_config.get("deformable_groups", 1)
with_modulated_dcn = dcn_config.get("with_modulated_dcn", False)
self.conv2 = DFConv2d(
bottleneck_channels,
bottleneck_channels,
with_modulated_dcn=with_modulated_dcn,
kernel_size=3,
stride=stride_3x3,
groups=num_groups,
dilation=dilation,
deformable_groups=deformable_groups,
bias=False
)
else:
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=dilation,
bias=False,
groups=num_groups,
dilation=dilation
)
nn.init.kaiming_uniform_(self.conv2.weight, a=1)
self.bn2 = norm_func(bottleneck_channels)
self.conv3 = Conv2d(
bottleneck_channels, out_channels, kernel_size=1, bias=False
)
self.bn3 = norm_func(out_channels)
for l in [self.conv1, self.conv3,]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu_(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu_(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = F.relu_(out)
return out
class BaseStem(nn.Module):
def __init__(self, cfg, norm_func):
super(BaseStem, self).__init__()
out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
self.conv1 = Conv2d(
3, out_channels, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_func(out_channels)
for l in [self.conv1,]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
class BottleneckWithFixedBatchNorm(Bottleneck):
def __init__(
self,
in_channels,
bottleneck_channels,
out_channels,
num_groups=1,
stride_in_1x1=True,
stride=1,
dilation=1,
dcn_config={}
):
super(BottleneckWithFixedBatchNorm, self).__init__(
in_channels=in_channels,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
stride_in_1x1=stride_in_1x1,
stride=stride,
dilation=dilation,
norm_func=FrozenBatchNorm2d,
dcn_config=dcn_config
)
class StemWithFixedBatchNorm(BaseStem):
def __init__(self, cfg):
super(StemWithFixedBatchNorm, self).__init__(
cfg, norm_func=FrozenBatchNorm2d
)
class BottleneckWithGN(Bottleneck):
def __init__(
self,
in_channels,
bottleneck_channels,
out_channels,
num_groups=1,
stride_in_1x1=True,
stride=1,
dilation=1,
dcn_config={}
):
super(BottleneckWithGN, self).__init__(
in_channels=in_channels,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
stride_in_1x1=stride_in_1x1,
stride=stride,
dilation=dilation,
norm_func=group_norm,
dcn_config=dcn_config
)
class StemWithGN(BaseStem):
def __init__(self, cfg):
super(StemWithGN, self).__init__(cfg, norm_func=group_norm)
_TRANSFORMATION_MODULES = Registry({
"BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm,
"BottleneckWithGN": BottleneckWithGN,
})
_STEM_MODULES = Registry({
"StemWithFixedBatchNorm": StemWithFixedBatchNorm,
"StemWithGN": StemWithGN,
})
_STAGE_SPECS = Registry({
"R-50-C4": ResNet50StagesTo4,
"R-50-C5": ResNet50StagesTo5,
"R-101-C4": ResNet101StagesTo4,
"R-101-C5": ResNet101StagesTo5,
"R-50-FPN": ResNet50FPNStagesTo5,
"R-50-FPN-X": ResNet50FPNStagesTo5,
"R-50-FPN-RETINANET": ResNet50FPNStagesTo5,
"R-101-FPN": ResNet101FPNStagesTo5,
"R-101-FPN-X": ResNet101FPNStagesTo5,
"R-101-FPN-RETINANET": ResNet101FPNStagesTo5,
"R-152-FPN": ResNet152FPNStagesTo5,
"R-152-FPN-X": ResNet152FPNStagesTo5,
})
| 14,321 | 30.476923 | 85 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/backbone/fbnet_builder.py | """
FBNet model builder
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import logging
import math
from collections import OrderedDict
import torch
import torch.nn as nn
from lib.scene_parser.rcnn.layers import (
BatchNorm2d,
Conv2d,
FrozenBatchNorm2d,
interpolate,
)
from lib.scene_parser.rcnn.layers.misc import _NewEmptyTensorOp
logger = logging.getLogger(__name__)
def _py2_round(x):
return math.floor(x + 0.5) if x >= 0.0 else math.ceil(x - 0.5)
def _get_divisible_by(num, divisible_by, min_val):
ret = int(num)
if divisible_by > 0 and num % divisible_by != 0:
ret = int((_py2_round(num / divisible_by) or min_val) * divisible_by)
return ret
PRIMITIVES = {
"skip": lambda C_in, C_out, expansion, stride, **kwargs: Identity(
C_in, C_out, stride
),
"ir_k3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, expansion, stride, **kwargs
),
"ir_k5": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, expansion, stride, kernel=5, **kwargs
),
"ir_k7": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, expansion, stride, kernel=7, **kwargs
),
"ir_k1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, expansion, stride, kernel=1, **kwargs
),
"shuffle": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, expansion, stride, shuffle_type="mid", pw_group=4, **kwargs
),
"basic_block": lambda C_in, C_out, expansion, stride, **kwargs: CascadeConv3x3(
C_in, C_out, stride
),
"shift_5x5": lambda C_in, C_out, expansion, stride, **kwargs: ShiftBlock5x5(
C_in, C_out, expansion, stride
),
# layer search 2
"ir_k3_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=3, **kwargs
),
"ir_k3_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 3, stride, kernel=3, **kwargs
),
"ir_k3_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 6, stride, kernel=3, **kwargs
),
"ir_k3_s4": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 4, stride, kernel=3, shuffle_type="mid", pw_group=4, **kwargs
),
"ir_k5_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=5, **kwargs
),
"ir_k5_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 3, stride, kernel=5, **kwargs
),
"ir_k5_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 6, stride, kernel=5, **kwargs
),
"ir_k5_s4": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 4, stride, kernel=5, shuffle_type="mid", pw_group=4, **kwargs
),
# layer search se
"ir_k3_e1_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=3, se=True, **kwargs
),
"ir_k3_e3_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 3, stride, kernel=3, se=True, **kwargs
),
"ir_k3_e6_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 6, stride, kernel=3, se=True, **kwargs
),
"ir_k3_s4_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in,
C_out,
4,
stride,
kernel=3,
shuffle_type="mid",
pw_group=4,
se=True,
**kwargs
),
"ir_k5_e1_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=5, se=True, **kwargs
),
"ir_k5_e3_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 3, stride, kernel=5, se=True, **kwargs
),
"ir_k5_e6_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 6, stride, kernel=5, se=True, **kwargs
),
"ir_k5_s4_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in,
C_out,
4,
stride,
kernel=5,
shuffle_type="mid",
pw_group=4,
se=True,
**kwargs
),
# layer search 3 (in addition to layer search 2)
"ir_k3_s2": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=3, shuffle_type="mid", pw_group=2, **kwargs
),
"ir_k5_s2": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=5, shuffle_type="mid", pw_group=2, **kwargs
),
"ir_k3_s2_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in,
C_out,
1,
stride,
kernel=3,
shuffle_type="mid",
pw_group=2,
se=True,
**kwargs
),
"ir_k5_s2_se": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in,
C_out,
1,
stride,
kernel=5,
shuffle_type="mid",
pw_group=2,
se=True,
**kwargs
),
# layer search 4 (in addition to layer search 3)
"ir_k3_sep": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, expansion, stride, kernel=3, cdw=True, **kwargs
),
"ir_k33_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=3, cdw=True, **kwargs
),
"ir_k33_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 3, stride, kernel=3, cdw=True, **kwargs
),
"ir_k33_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 6, stride, kernel=3, cdw=True, **kwargs
),
# layer search 5 (in addition to layer search 4)
"ir_k7_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=7, **kwargs
),
"ir_k7_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 3, stride, kernel=7, **kwargs
),
"ir_k7_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 6, stride, kernel=7, **kwargs
),
"ir_k7_sep": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, expansion, stride, kernel=7, cdw=True, **kwargs
),
"ir_k7_sep_e1": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 1, stride, kernel=7, cdw=True, **kwargs
),
"ir_k7_sep_e3": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 3, stride, kernel=7, cdw=True, **kwargs
),
"ir_k7_sep_e6": lambda C_in, C_out, expansion, stride, **kwargs: IRFBlock(
C_in, C_out, 6, stride, kernel=7, cdw=True, **kwargs
),
}
class Identity(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Identity, self).__init__()
self.conv = (
ConvBNRelu(
C_in,
C_out,
kernel=1,
stride=stride,
pad=0,
no_bias=1,
use_relu="relu",
bn_type="bn",
)
if C_in != C_out or stride != 1
else None
)
def forward(self, x):
if self.conv:
out = self.conv(x)
else:
out = x
return out
class CascadeConv3x3(nn.Sequential):
def __init__(self, C_in, C_out, stride):
assert stride in [1, 2]
ops = [
Conv2d(C_in, C_in, 3, stride, 1, bias=False),
BatchNorm2d(C_in),
nn.ReLU(inplace=True),
Conv2d(C_in, C_out, 3, 1, 1, bias=False),
BatchNorm2d(C_out),
]
super(CascadeConv3x3, self).__init__(*ops)
self.res_connect = (stride == 1) and (C_in == C_out)
def forward(self, x):
y = super(CascadeConv3x3, self).forward(x)
if self.res_connect:
y += x
return y
class Shift(nn.Module):
def __init__(self, C, kernel_size, stride, padding):
super(Shift, self).__init__()
self.C = C
kernel = torch.zeros((C, 1, kernel_size, kernel_size), dtype=torch.float32)
ch_idx = 0
assert stride in [1, 2]
self.stride = stride
self.padding = padding
self.kernel_size = kernel_size
self.dilation = 1
hks = kernel_size // 2
ksq = kernel_size ** 2
for i in range(kernel_size):
for j in range(kernel_size):
if i == hks and j == hks:
num_ch = C // ksq + C % ksq
else:
num_ch = C // ksq
kernel[ch_idx : ch_idx + num_ch, 0, i, j] = 1
ch_idx += num_ch
self.register_parameter("bias", None)
self.kernel = nn.Parameter(kernel, requires_grad=False)
def forward(self, x):
if x.numel() > 0:
return nn.functional.conv2d(
x,
self.kernel,
self.bias,
(self.stride, self.stride),
(self.padding, self.padding),
self.dilation,
self.C, # groups
)
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
(self.padding, self.dilation),
(self.dilation, self.dilation),
(self.kernel_size, self.kernel_size),
(self.stride, self.stride),
)
]
output_shape = [x.shape[0], self.C] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class ShiftBlock5x5(nn.Sequential):
def __init__(self, C_in, C_out, expansion, stride):
assert stride in [1, 2]
self.res_connect = (stride == 1) and (C_in == C_out)
C_mid = _get_divisible_by(C_in * expansion, 8, 8)
ops = [
# pw
Conv2d(C_in, C_mid, 1, 1, 0, bias=False),
BatchNorm2d(C_mid),
nn.ReLU(inplace=True),
# shift
Shift(C_mid, 5, stride, 2),
# pw-linear
Conv2d(C_mid, C_out, 1, 1, 0, bias=False),
BatchNorm2d(C_out),
]
super(ShiftBlock5x5, self).__init__(*ops)
def forward(self, x):
y = super(ShiftBlock5x5, self).forward(x)
if self.res_connect:
y += x
return y
class ChannelShuffle(nn.Module):
def __init__(self, groups):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
assert C % g == 0, "Incompatible group size {} for input channel {}".format(
g, C
)
return (
x.view(N, g, int(C / g), H, W)
.permute(0, 2, 1, 3, 4)
.contiguous()
.view(N, C, H, W)
)
class ConvBNRelu(nn.Sequential):
def __init__(
self,
input_depth,
output_depth,
kernel,
stride,
pad,
no_bias,
use_relu,
bn_type,
group=1,
*args,
**kwargs
):
super(ConvBNRelu, self).__init__()
assert use_relu in ["relu", None]
if isinstance(bn_type, (list, tuple)):
assert len(bn_type) == 2
assert bn_type[0] == "gn"
gn_group = bn_type[1]
bn_type = bn_type[0]
assert bn_type in ["bn", "af", "gn", None]
assert stride in [1, 2, 4]
op = Conv2d(
input_depth,
output_depth,
kernel_size=kernel,
stride=stride,
padding=pad,
bias=not no_bias,
groups=group,
*args,
**kwargs
)
nn.init.kaiming_normal_(op.weight, mode="fan_out", nonlinearity="relu")
if op.bias is not None:
nn.init.constant_(op.bias, 0.0)
self.add_module("conv", op)
if bn_type == "bn":
bn_op = BatchNorm2d(output_depth)
elif bn_type == "gn":
bn_op = nn.GroupNorm(num_groups=gn_group, num_channels=output_depth)
elif bn_type == "af":
bn_op = FrozenBatchNorm2d(output_depth)
if bn_type is not None:
self.add_module("bn", bn_op)
if use_relu == "relu":
self.add_module("relu", nn.ReLU(inplace=True))
class SEModule(nn.Module):
reduction = 4
def __init__(self, C):
super(SEModule, self).__init__()
mid = max(C // self.reduction, 8)
conv1 = Conv2d(C, mid, 1, 1, 0)
conv2 = Conv2d(mid, C, 1, 1, 0)
self.op = nn.Sequential(
nn.AdaptiveAvgPool2d(1), conv1, nn.ReLU(inplace=True), conv2, nn.Sigmoid()
)
def forward(self, x):
return x * self.op(x)
class Upsample(nn.Module):
def __init__(self, scale_factor, mode, align_corners=None):
super(Upsample, self).__init__()
self.scale = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return interpolate(
x, scale_factor=self.scale, mode=self.mode,
align_corners=self.align_corners
)
def _get_upsample_op(stride):
assert (
stride in [1, 2, 4]
or stride in [-1, -2, -4]
or (isinstance(stride, tuple) and all(x in [-1, -2, -4] for x in stride))
)
scales = stride
ret = None
if isinstance(stride, tuple) or stride < 0:
scales = [-x for x in stride] if isinstance(stride, tuple) else -stride
stride = 1
ret = Upsample(scale_factor=scales, mode="nearest", align_corners=None)
return ret, stride
class IRFBlock(nn.Module):
def __init__(
self,
input_depth,
output_depth,
expansion,
stride,
bn_type="bn",
kernel=3,
width_divisor=1,
shuffle_type=None,
pw_group=1,
se=False,
cdw=False,
dw_skip_bn=False,
dw_skip_relu=False,
):
super(IRFBlock, self).__init__()
assert kernel in [1, 3, 5, 7], kernel
self.use_res_connect = stride == 1 and input_depth == output_depth
self.output_depth = output_depth
mid_depth = int(input_depth * expansion)
mid_depth = _get_divisible_by(mid_depth, width_divisor, width_divisor)
# pw
self.pw = ConvBNRelu(
input_depth,
mid_depth,
kernel=1,
stride=1,
pad=0,
no_bias=1,
use_relu="relu",
bn_type=bn_type,
group=pw_group,
)
# negative stride to do upsampling
self.upscale, stride = _get_upsample_op(stride)
# dw
if kernel == 1:
self.dw = nn.Sequential()
elif cdw:
dw1 = ConvBNRelu(
mid_depth,
mid_depth,
kernel=kernel,
stride=stride,
pad=(kernel // 2),
group=mid_depth,
no_bias=1,
use_relu="relu",
bn_type=bn_type,
)
dw2 = ConvBNRelu(
mid_depth,
mid_depth,
kernel=kernel,
stride=1,
pad=(kernel // 2),
group=mid_depth,
no_bias=1,
use_relu="relu" if not dw_skip_relu else None,
bn_type=bn_type if not dw_skip_bn else None,
)
self.dw = nn.Sequential(OrderedDict([("dw1", dw1), ("dw2", dw2)]))
else:
self.dw = ConvBNRelu(
mid_depth,
mid_depth,
kernel=kernel,
stride=stride,
pad=(kernel // 2),
group=mid_depth,
no_bias=1,
use_relu="relu" if not dw_skip_relu else None,
bn_type=bn_type if not dw_skip_bn else None,
)
# pw-linear
self.pwl = ConvBNRelu(
mid_depth,
output_depth,
kernel=1,
stride=1,
pad=0,
no_bias=1,
use_relu=None,
bn_type=bn_type,
group=pw_group,
)
self.shuffle_type = shuffle_type
if shuffle_type is not None:
self.shuffle = ChannelShuffle(pw_group)
self.se4 = SEModule(output_depth) if se else nn.Sequential()
self.output_depth = output_depth
def forward(self, x):
y = self.pw(x)
if self.shuffle_type == "mid":
y = self.shuffle(y)
if self.upscale is not None:
y = self.upscale(y)
y = self.dw(y)
y = self.pwl(y)
if self.use_res_connect:
y += x
y = self.se4(y)
return y
def _expand_block_cfg(block_cfg):
assert isinstance(block_cfg, list)
ret = []
for idx in range(block_cfg[2]):
cur = copy.deepcopy(block_cfg)
cur[2] = 1
cur[3] = 1 if idx >= 1 else cur[3]
ret.append(cur)
return ret
def expand_stage_cfg(stage_cfg):
""" For a single stage """
assert isinstance(stage_cfg, list)
ret = []
for x in stage_cfg:
ret += _expand_block_cfg(x)
return ret
def expand_stages_cfg(stage_cfgs):
""" For a list of stages """
assert isinstance(stage_cfgs, list)
ret = []
for x in stage_cfgs:
ret.append(expand_stage_cfg(x))
return ret
def _block_cfgs_to_list(block_cfgs):
assert isinstance(block_cfgs, list)
ret = []
for stage_idx, stage in enumerate(block_cfgs):
stage = expand_stage_cfg(stage)
for block_idx, block in enumerate(stage):
cur = {"stage_idx": stage_idx, "block_idx": block_idx, "block": block}
ret.append(cur)
return ret
def _add_to_arch(arch, info, name):
""" arch = [{block_0}, {block_1}, ...]
info = [
# stage 0
[
block0_info,
block1_info,
...
], ...
]
convert to:
arch = [
{
block_0,
name: block0_info,
},
{
block_1,
name: block1_info,
}, ...
]
"""
assert isinstance(arch, list) and all(isinstance(x, dict) for x in arch)
assert isinstance(info, list) and all(isinstance(x, list) for x in info)
idx = 0
for stage_idx, stage in enumerate(info):
for block_idx, block in enumerate(stage):
assert (
arch[idx]["stage_idx"] == stage_idx
and arch[idx]["block_idx"] == block_idx
), "Index ({}, {}) does not match for block {}".format(
stage_idx, block_idx, arch[idx]
)
assert name not in arch[idx]
arch[idx][name] = block
idx += 1
def unify_arch_def(arch_def):
""" unify the arch_def to:
{
...,
"arch": [
{
"stage_idx": idx,
"block_idx": idx,
...
},
{}, ...
]
}
"""
ret = copy.deepcopy(arch_def)
assert "block_cfg" in arch_def and "stages" in arch_def["block_cfg"]
assert "stages" not in ret
# copy 'first', 'last' etc. inside arch_def['block_cfg'] to ret
ret.update({x: arch_def["block_cfg"][x] for x in arch_def["block_cfg"]})
ret["stages"] = _block_cfgs_to_list(arch_def["block_cfg"]["stages"])
del ret["block_cfg"]
assert "block_op_type" in arch_def
_add_to_arch(ret["stages"], arch_def["block_op_type"], "block_op_type")
del ret["block_op_type"]
return ret
def get_num_stages(arch_def):
ret = 0
for x in arch_def["stages"]:
ret = max(x["stage_idx"], ret)
ret = ret + 1
return ret
def get_blocks(arch_def, stage_indices=None, block_indices=None):
ret = copy.deepcopy(arch_def)
ret["stages"] = []
for block in arch_def["stages"]:
keep = True
if stage_indices not in (None, []) and block["stage_idx"] not in stage_indices:
keep = False
if block_indices not in (None, []) and block["block_idx"] not in block_indices:
keep = False
if keep:
ret["stages"].append(block)
return ret
class FBNetBuilder(object):
def __init__(
self,
width_ratio,
bn_type="bn",
width_divisor=1,
dw_skip_bn=False,
dw_skip_relu=False,
):
self.width_ratio = width_ratio
self.last_depth = -1
self.bn_type = bn_type
self.width_divisor = width_divisor
self.dw_skip_bn = dw_skip_bn
self.dw_skip_relu = dw_skip_relu
def add_first(self, stage_info, dim_in=3, pad=True):
# stage_info: [c, s, kernel]
assert len(stage_info) >= 2
channel = stage_info[0]
stride = stage_info[1]
out_depth = self._get_divisible_width(int(channel * self.width_ratio))
kernel = 3
if len(stage_info) > 2:
kernel = stage_info[2]
out = ConvBNRelu(
dim_in,
out_depth,
kernel=kernel,
stride=stride,
pad=kernel // 2 if pad else 0,
no_bias=1,
use_relu="relu",
bn_type=self.bn_type,
)
self.last_depth = out_depth
return out
def add_blocks(self, blocks):
""" blocks: [{}, {}, ...]
"""
assert isinstance(blocks, list) and all(
isinstance(x, dict) for x in blocks
), blocks
modules = OrderedDict()
for block in blocks:
stage_idx = block["stage_idx"]
block_idx = block["block_idx"]
block_op_type = block["block_op_type"]
tcns = block["block"]
n = tcns[2]
assert n == 1
nnblock = self.add_ir_block(tcns, [block_op_type])
nn_name = "xif{}_{}".format(stage_idx, block_idx)
assert nn_name not in modules
modules[nn_name] = nnblock
ret = nn.Sequential(modules)
return ret
def add_last(self, stage_info):
""" skip last layer if channel_scale == 0
use the same output channel if channel_scale < 0
"""
assert len(stage_info) == 2
channels = stage_info[0]
channel_scale = stage_info[1]
if channel_scale == 0.0:
return nn.Sequential()
if channel_scale > 0:
last_channel = (
int(channels * self.width_ratio) if self.width_ratio > 1.0 else channels
)
last_channel = int(last_channel * channel_scale)
else:
last_channel = int(self.last_depth * (-channel_scale))
last_channel = self._get_divisible_width(last_channel)
if last_channel == 0:
return nn.Sequential()
dim_in = self.last_depth
ret = ConvBNRelu(
dim_in,
last_channel,
kernel=1,
stride=1,
pad=0,
no_bias=1,
use_relu="relu",
bn_type=self.bn_type,
)
self.last_depth = last_channel
return ret
# def add_final_pool(self, model, blob_in, kernel_size):
# ret = model.AveragePool(blob_in, "final_avg", kernel=kernel_size, stride=1)
# return ret
def _add_ir_block(
self, dim_in, dim_out, stride, expand_ratio, block_op_type, **kwargs
):
ret = PRIMITIVES[block_op_type](
dim_in,
dim_out,
expansion=expand_ratio,
stride=stride,
bn_type=self.bn_type,
width_divisor=self.width_divisor,
dw_skip_bn=self.dw_skip_bn,
dw_skip_relu=self.dw_skip_relu,
**kwargs
)
return ret, ret.output_depth
def add_ir_block(self, tcns, block_op_types, **kwargs):
t, c, n, s = tcns
assert n == 1
out_depth = self._get_divisible_width(int(c * self.width_ratio))
dim_in = self.last_depth
op, ret_depth = self._add_ir_block(
dim_in,
out_depth,
stride=s,
expand_ratio=t,
block_op_type=block_op_types[0],
**kwargs
)
self.last_depth = ret_depth
return op
def _get_divisible_width(self, width):
ret = _get_divisible_by(int(width), self.width_divisor, self.width_divisor)
return ret
| 24,970 | 29.085542 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/backbone/fbnet.py | from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
from collections import OrderedDict
from . import (
fbnet_builder as mbuilder,
fbnet_modeldef as modeldef,
)
import torch.nn as nn
from lib.scene_parser.rcnn.modeling import registry
from lib.scene_parser.rcnn.modeling.rpn import rpn
from lib.scene_parser.rcnn.modeling import poolers
logger = logging.getLogger(__name__)
def create_builder(cfg):
bn_type = cfg.MODEL.FBNET.BN_TYPE
if bn_type == "gn":
bn_type = (bn_type, cfg.GROUP_NORM.NUM_GROUPS)
factor = cfg.MODEL.FBNET.SCALE_FACTOR
arch = cfg.MODEL.FBNET.ARCH
arch_def = cfg.MODEL.FBNET.ARCH_DEF
if len(arch_def) > 0:
arch_def = json.loads(arch_def)
if arch in modeldef.MODEL_ARCH:
if len(arch_def) > 0:
assert (
arch_def == modeldef.MODEL_ARCH[arch]
), "Two architectures with the same name {},\n{},\n{}".format(
arch, arch_def, modeldef.MODEL_ARCH[arch]
)
arch_def = modeldef.MODEL_ARCH[arch]
else:
assert arch_def is not None and len(arch_def) > 0
arch_def = mbuilder.unify_arch_def(arch_def)
rpn_stride = arch_def.get("rpn_stride", None)
if rpn_stride is not None:
assert (
cfg.MODEL.RPN.ANCHOR_STRIDE[0] == rpn_stride
), "Needs to set cfg.MODEL.RPN.ANCHOR_STRIDE to {}, got {}".format(
rpn_stride, cfg.MODEL.RPN.ANCHOR_STRIDE
)
width_divisor = cfg.MODEL.FBNET.WIDTH_DIVISOR
dw_skip_bn = cfg.MODEL.FBNET.DW_CONV_SKIP_BN
dw_skip_relu = cfg.MODEL.FBNET.DW_CONV_SKIP_RELU
logger.info(
"Building fbnet model with arch {} (without scaling):\n{}".format(
arch, arch_def
)
)
builder = mbuilder.FBNetBuilder(
width_ratio=factor,
bn_type=bn_type,
width_divisor=width_divisor,
dw_skip_bn=dw_skip_bn,
dw_skip_relu=dw_skip_relu,
)
return builder, arch_def
def _get_trunk_cfg(arch_def):
""" Get all stages except the last one """
num_stages = mbuilder.get_num_stages(arch_def)
trunk_stages = arch_def.get("backbone", range(num_stages - 1))
ret = mbuilder.get_blocks(arch_def, stage_indices=trunk_stages)
return ret
class FBNetTrunk(nn.Module):
def __init__(
self, builder, arch_def, dim_in,
):
super(FBNetTrunk, self).__init__()
self.first = builder.add_first(arch_def["first"], dim_in=dim_in)
trunk_cfg = _get_trunk_cfg(arch_def)
self.stages = builder.add_blocks(trunk_cfg["stages"])
# return features for each stage
def forward(self, x):
y = self.first(x)
y = self.stages(y)
ret = [y]
return ret
@registry.BACKBONES.register("FBNet")
def add_conv_body(cfg, dim_in=3):
builder, arch_def = create_builder(cfg)
body = FBNetTrunk(builder, arch_def, dim_in)
model = nn.Sequential(OrderedDict([("body", body)]))
model.out_channels = builder.last_depth
return model
def _get_rpn_stage(arch_def, num_blocks):
rpn_stage = arch_def.get("rpn")
ret = mbuilder.get_blocks(arch_def, stage_indices=rpn_stage)
if num_blocks > 0:
logger.warn('Use last {} blocks in {} as rpn'.format(num_blocks, ret))
block_count = len(ret["stages"])
assert num_blocks <= block_count, "use block {}, block count {}".format(
num_blocks, block_count
)
blocks = range(block_count - num_blocks, block_count)
ret = mbuilder.get_blocks(ret, block_indices=blocks)
return ret["stages"]
class FBNetRPNHead(nn.Module):
def __init__(
self, cfg, in_channels, builder, arch_def,
):
super(FBNetRPNHead, self).__init__()
assert in_channels == builder.last_depth
rpn_bn_type = cfg.MODEL.FBNET.RPN_BN_TYPE
if len(rpn_bn_type) > 0:
builder.bn_type = rpn_bn_type
use_blocks = cfg.MODEL.FBNET.RPN_HEAD_BLOCKS
stages = _get_rpn_stage(arch_def, use_blocks)
self.head = builder.add_blocks(stages)
self.out_channels = builder.last_depth
def forward(self, x):
x = [self.head(y) for y in x]
return x
@registry.RPN_HEADS.register("FBNet.rpn_head")
def add_rpn_head(cfg, in_channels, num_anchors):
builder, model_arch = create_builder(cfg)
builder.last_depth = in_channels
assert in_channels == builder.last_depth
# builder.name_prefix = "[rpn]"
rpn_feature = FBNetRPNHead(cfg, in_channels, builder, model_arch)
rpn_regressor = rpn.RPNHeadConvRegressor(
cfg, rpn_feature.out_channels, num_anchors)
return nn.Sequential(rpn_feature, rpn_regressor)
def _get_head_stage(arch, head_name, blocks):
# use default name 'head' if the specific name 'head_name' does not existed
if head_name not in arch:
head_name = "head"
head_stage = arch.get(head_name)
ret = mbuilder.get_blocks(arch, stage_indices=head_stage, block_indices=blocks)
return ret["stages"]
# name mapping for head names in arch def and cfg
ARCH_CFG_NAME_MAPPING = {
"bbox": "ROI_BOX_HEAD",
"kpts": "ROI_KEYPOINT_HEAD",
"mask": "ROI_MASK_HEAD",
}
class FBNetROIHead(nn.Module):
def __init__(
self, cfg, in_channels, builder, arch_def,
head_name, use_blocks, stride_init, last_layer_scale,
):
super(FBNetROIHead, self).__init__()
assert in_channels == builder.last_depth
assert isinstance(use_blocks, list)
head_cfg_name = ARCH_CFG_NAME_MAPPING[head_name]
self.pooler = poolers.make_pooler(cfg, head_cfg_name)
stage = _get_head_stage(arch_def, head_name, use_blocks)
assert stride_init in [0, 1, 2]
if stride_init != 0:
stage[0]["block"][3] = stride_init
blocks = builder.add_blocks(stage)
last_info = copy.deepcopy(arch_def["last"])
last_info[1] = last_layer_scale
last = builder.add_last(last_info)
self.head = nn.Sequential(OrderedDict([
("blocks", blocks),
("last", last)
]))
self.out_channels = builder.last_depth
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.head(x)
return x
@registry.ROI_BOX_FEATURE_EXTRACTORS.register("FBNet.roi_head")
def add_roi_head(cfg, in_channels):
builder, model_arch = create_builder(cfg)
builder.last_depth = in_channels
# builder.name_prefix = "_[bbox]_"
return FBNetROIHead(
cfg, in_channels, builder, model_arch,
head_name="bbox",
use_blocks=cfg.MODEL.FBNET.DET_HEAD_BLOCKS,
stride_init=cfg.MODEL.FBNET.DET_HEAD_STRIDE,
last_layer_scale=cfg.MODEL.FBNET.DET_HEAD_LAST_SCALE,
)
@registry.ROI_KEYPOINT_FEATURE_EXTRACTORS.register("FBNet.roi_head_keypoints")
def add_roi_head_keypoints(cfg, in_channels):
builder, model_arch = create_builder(cfg)
builder.last_depth = in_channels
# builder.name_prefix = "_[kpts]_"
return FBNetROIHead(
cfg, in_channels, builder, model_arch,
head_name="kpts",
use_blocks=cfg.MODEL.FBNET.KPTS_HEAD_BLOCKS,
stride_init=cfg.MODEL.FBNET.KPTS_HEAD_STRIDE,
last_layer_scale=cfg.MODEL.FBNET.KPTS_HEAD_LAST_SCALE,
)
@registry.ROI_MASK_FEATURE_EXTRACTORS.register("FBNet.roi_head_mask")
def add_roi_head_mask(cfg, in_channels):
builder, model_arch = create_builder(cfg)
builder.last_depth = in_channels
# builder.name_prefix = "_[mask]_"
return FBNetROIHead(
cfg, in_channels, builder, model_arch,
head_name="mask",
use_blocks=cfg.MODEL.FBNET.MASK_HEAD_BLOCKS,
stride_init=cfg.MODEL.FBNET.MASK_HEAD_STRIDE,
last_layer_scale=cfg.MODEL.FBNET.MASK_HEAD_LAST_SCALE,
)
| 7,854 | 30.047431 | 83 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/backbone/backbone.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
from torch import nn
from .. import registry
from lib.scene_parser.rcnn.modeling.make_layers import conv_with_kaiming_uniform
from . import fpn as fpn_module
from . import resnet
@registry.BACKBONES.register("R-50-C4")
@registry.BACKBONES.register("R-50-C5")
@registry.BACKBONES.register("R-101-C4")
@registry.BACKBONES.register("R-101-C5")
def build_resnet_backbone(cfg):
body = resnet.ResNet(cfg)
model = nn.Sequential(OrderedDict([("body", body)]))
model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
return model
@registry.BACKBONES.register("R-50-FPN")
@registry.BACKBONES.register("R-101-FPN")
@registry.BACKBONES.register("R-152-FPN")
@registry.BACKBONES.register("R-50-FPN-X")
@registry.BACKBONES.register("R-101-FPN-X")
@registry.BACKBONES.register("R-152-FPN-X")
def build_resnet_fpn_backbone(cfg):
body = resnet.ResNet(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
fpn = fpn_module.FPN(
in_channels_list=[
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelMaxPool(),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
@registry.BACKBONES.register("R-50-FPN-RETINANET")
@registry.BACKBONES.register("R-101-FPN-RETINANET")
def build_resnet_fpn_p3p7_backbone(cfg):
body = resnet.ResNet(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
in_channels_p6p7 = in_channels_stage2 * 8 if cfg.MODEL.RETINANET.USE_C5 \
else out_channels
fpn = fpn_module.FPN(
in_channels_list=[
0,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelP6P7(in_channels_p6p7, out_channels),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
def build_backbone(cfg):
assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \
"cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format(
cfg.MODEL.BACKBONE.CONV_BODY
)
return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
| 2,868 | 33.566265 | 81 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/backbone/fpn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
class FPN(nn.Module):
"""
Module that adds FPN on top of a list of feature maps.
The feature maps are currently supposed to be in increasing depth
order, and must be consecutive
"""
def __init__(
self, in_channels_list, out_channels, conv_block, top_blocks=None
):
"""
Arguments:
in_channels_list (list[int]): number of channels for each feature map that
will be fed
out_channels (int): number of channels of the FPN representation
top_blocks (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list
"""
super(FPN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
for idx, in_channels in enumerate(in_channels_list, 1):
inner_block = "fpn_inner{}".format(idx)
layer_block = "fpn_layer{}".format(idx)
if in_channels == 0:
continue
inner_block_module = conv_block(in_channels, out_channels, 1)
layer_block_module = conv_block(out_channels, out_channels, 3, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.top_blocks = top_blocks
def forward(self, x):
"""
Arguments:
x (list[Tensor]): feature maps for each feature level.
Returns:
results (tuple[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.
"""
last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
results = []
results.append(getattr(self, self.layer_blocks[-1])(last_inner))
for feature, inner_block, layer_block in zip(
x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
):
if not inner_block:
continue
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
inner_lateral = getattr(self, inner_block)(feature)
# TODO use size instead of scale to make it robust to different sizes
# inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
# mode='bilinear', align_corners=False)
last_inner = inner_lateral + inner_top_down
results.insert(0, getattr(self, layer_block)(last_inner))
if isinstance(self.top_blocks, LastLevelP6P7):
last_results = self.top_blocks(x[-1], results[-1])
results.extend(last_results)
elif isinstance(self.top_blocks, LastLevelMaxPool):
last_results = self.top_blocks(results[-1])
results.extend(last_results)
return tuple(results)
class LastLevelMaxPool(nn.Module):
def forward(self, x):
return [F.max_pool2d(x, 1, 2, 0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7.
"""
def __init__(self, in_channels, out_channels):
super(LastLevelP6P7, self).__init__()
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
nn.init.kaiming_uniform_(module.weight, a=1)
nn.init.constant_(module.bias, 0)
self.use_P5 = in_channels == out_channels
def forward(self, c5, p5):
x = p5 if self.use_P5 else c5
p6 = self.p6(x)
p7 = self.p7(F.relu(p6))
return [p6, p7]
| 3,939 | 38.4 | 86 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/detector/generalized_rcnn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from lib.scene_parser.rcnn.structures.image_list import to_image_list
from ..backbone import build_backbone
from ..rpn.rpn import build_rpn
from ..roi_heads.roi_heads import build_roi_heads
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
- rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.rpn = build_rpn(cfg, self.backbone.out_channels)
self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
images = to_image_list(images)
features = self.backbone(images.tensors)
proposals, proposal_losses = self.rpn(images, features, targets)
if self.roi_heads:
x, result, detector_losses = self.roi_heads(features, proposals, targets)
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
return result
| 2,233 | 33.369231 | 87 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from lib.scene_parser.rcnn.structures.bounding_box import BoxList
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_nms
from lib.scene_parser.rcnn.structures.boxlist_ops import cat_boxlist
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
from lib.scene_parser.rcnn.structures.bounding_box_pair import BoxPairList
class PostProcessor(nn.Module):
"""
From a set of classification scores, box regression and proposals,
computes the post-processed boxes, and applies NMS to obtain the
final results
"""
def __init__(
self,
score_thresh=0.05,
nms=0.5,
detections_per_img=100,
box_coder=None,
cls_agnostic_bbox_reg=False,
bbox_aug_enabled=False
):
"""
Arguments:
score_thresh (float)
nms (float)
detections_per_img (int)
box_coder (BoxCoder)
"""
super(PostProcessor, self).__init__()
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
if box_coder is None:
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.bbox_aug_enabled = bbox_aug_enabled
def forward(self, x, boxes, use_freq_prior=False):
"""
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
"""
class_logits = x
class_prob = class_logits if use_freq_prior else F.softmax(class_logits, -1)
# TODO think about a representation of batch of boxes
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
num_classes = class_prob.shape[1]
proposals = boxes
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for prob, boxes_per_img, image_shape in zip(
class_prob, proposals, image_shapes
):
boxes_per_img.add_field("scores", prob)
results.append(boxes_per_img)
return results
def prepare_boxpairlist(self, boxes, scores, image_shape):
"""
Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
boxes = boxes.reshape(-1, 8)
scores = scores.reshape(-1)
boxlist = BoxPairList(boxes, image_shape, mode="xyxy")
boxlist.add_field("scores", scores)
return boxlist
def filter_results(self, boxlist, num_classes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
"""
# unwrap the boxlist to avoid additional overhead.
# if we had multi-class NMS, we could perform this directly on the boxlist
boxes = boxlist.bbox.reshape(-1, num_classes * 4)
scores = boxlist.get_field("scores").reshape(-1, num_classes)
device = scores.device
result = []
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
inds_all = scores > self.score_thresh
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.detections_per_img > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
def make_roi_relation_post_processor(cfg):
use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH
nms_thresh = cfg.MODEL.ROI_HEADS.NMS
detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED
postprocessor = PostProcessor(
score_thresh,
nms_thresh,
detections_per_img,
box_coder,
cls_agnostic_bbox_reg,
bbox_aug_enabled
)
return postprocessor
| 6,176 | 37.12963 | 88 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/roi_relation_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from lib.scene_parser.rcnn.modeling import registry
from lib.scene_parser.rcnn.modeling.backbone import resnet
from lib.scene_parser.rcnn.modeling.poolers import Pooler
from lib.scene_parser.rcnn.modeling.make_layers import group_norm
from lib.scene_parser.rcnn.modeling.make_layers import make_fc
from .sparse_targets import _get_tensor_from_boxlist, _get_rel_inds
@registry.ROI_RELATION_FEATURE_EXTRACTORS.register("ResNet50Conv5ROIRelationFeatureExtractor")
class ResNet50Conv5ROIFeatureExtractor(nn.Module):
def __init__(self, config, in_channels):
super(ResNet50Conv5ROIFeatureExtractor, self).__init__()
resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
stage = resnet.StageSpec(index=4, block_count=3, return_features=False)
head = resnet.ResNetHead(
block_module=config.MODEL.RESNETS.TRANS_FUNC,
stages=(stage,),
num_groups=config.MODEL.RESNETS.NUM_GROUPS,
width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP,
stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1,
stride_init=None,
res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS,
dilation=config.MODEL.RESNETS.RES5_DILATION
)
self.pooler = pooler
self.head = head
self.out_channels = head.out_channels
def _union_box_feats(self, x, proposal_pairs):
proposals_union = [proposal_pair.copy_with_union() for proposal_pair in proposal_pairs]
x_union = self.pooler(x, proposals_union)
x = self.head(x_union)
return x
def forward(self, x, proposals, proposal_pairs):
# acquire tensor format per batch data
# bboxes, cls_prob (N, k)
# im_inds: (N,1), img ind for each roi in the batch
obj_box_priors, obj_labels, im_inds \
= _get_tensor_from_boxlist(proposals, 'labels')
# get index in the proposal pairs
_, proposal_idx_pairs, im_inds_pairs = _get_tensor_from_boxlist(
proposal_pairs, 'idx_pairs')
rel_inds = _get_rel_inds(im_inds, im_inds_pairs, proposal_idx_pairs)
x = self._union_box_feats(x, proposal_pairs)
return x, rel_inds
@registry.ROI_RELATION_FEATURE_EXTRACTORS.register("FPN2MLPRelationFeatureExtractor")
class FPN2MLPFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPN2MLPFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_RELATION_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_RELATION_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_RELATION_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = in_channels * resolution ** 2
representation_size = cfg.MODEL.ROI_RELATION_HEAD.MLP_HEAD_DIM
use_gn = cfg.MODEL.ROI_RELATION_HEAD.USE_GN
self.pooler = pooler
self.fc6 = make_fc(input_size, representation_size, use_gn)
self.fc7 = make_fc(representation_size, representation_size, use_gn)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
@registry.ROI_RELATION_FEATURE_EXTRACTORS.register("FPNXconv1fcRelationFeatureExtractor")
class FPNXconv1fcFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPNXconv1fcFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_RELATION_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_RELATION_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_RELATION_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
self.pooler = pooler
use_gn = cfg.MODEL.ROI_RELATION_HEAD.USE_GN
conv_head_dim = cfg.MODEL.ROI_RELATION_HEAD.CONV_HEAD_DIM
num_stacked_convs = cfg.MODEL.ROI_RELATION_HEAD.NUM_STACKED_CONVS
dilation = cfg.MODEL.ROI_RELATION_HEAD.DILATION
xconvs = []
for ix in range(num_stacked_convs):
xconvs.append(
nn.Conv2d(
in_channels,
conv_head_dim,
kernel_size=3,
stride=1,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
)
in_channels = conv_head_dim
if use_gn:
xconvs.append(group_norm(in_channels))
xconvs.append(nn.ReLU(inplace=True))
self.add_module("xconvs", nn.Sequential(*xconvs))
for modules in [self.xconvs,]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
if not use_gn:
torch.nn.init.constant_(l.bias, 0)
input_size = conv_head_dim * resolution ** 2
representation_size = cfg.MODEL.ROI_RELATION_HEAD.MLP_HEAD_DIM
self.fc6 = make_fc(input_size, representation_size, use_gn=False)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.xconvs(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
return x
def make_roi_relation_feature_extractor(cfg, in_channels):
func = registry.ROI_RELATION_FEATURE_EXTRACTORS[
cfg.MODEL.ROI_RELATION_HEAD.FEATURE_EXTRACTOR
]
return func(cfg, in_channels)
| 6,349 | 36.797619 | 95 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/relation_heads.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Relation head for predicting relationship between object pairs.
# Written by Jianwei Yang (jw2yang@gatech.edu).
import numpy as np
import torch
from torch import nn
from lib.scene_parser.rcnn.structures.bounding_box_pair import BoxPairList
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_iou, cat_boxlist
from ..roi_heads.box_head.roi_box_feature_extractors import make_roi_box_feature_extractor
from ..roi_heads.box_head.roi_box_predictors import make_roi_box_predictor
from ..roi_heads.box_head.inference import make_roi_box_post_processor
from .inference import make_roi_relation_post_processor
from .loss import make_roi_relation_loss_evaluator
from .sparse_targets import FrequencyBias, _get_tensor_from_boxlist, _get_rel_inds
from .relpn.relpn import make_relation_proposal_network
from .baseline.baseline import build_baseline_model
from .imp.imp import build_imp_model
from .msdn.msdn import build_msdn_model
from .grcnn.grcnn import build_grcnn_model
from .reldn.reldn import build_reldn_model
class ROIRelationHead(torch.nn.Module):
"""
Generic Relation Head class.
"""
def __init__(self, cfg, in_channels):
super(ROIRelationHead, self).__init__()
self.cfg = cfg
if cfg.MODEL.ALGORITHM == "sg_baseline":
self.rel_predictor = build_baseline_model(cfg, in_channels)
elif cfg.MODEL.ALGORITHM == "sg_imp":
self.rel_predictor = build_imp_model(cfg, in_channels)
elif cfg.MODEL.ALGORITHM == "sg_msdn":
self.rel_predictor = build_msdn_model(cfg, in_channels)
elif cfg.MODEL.ALGORITHM == "sg_grcnn":
self.rel_predictor = build_grcnn_model(cfg, in_channels)
elif cfg.MODEL.ALGORITHM == "sg_reldn":
self.rel_predictor = build_reldn_model(cfg, in_channels)
self.post_processor = make_roi_relation_post_processor(cfg)
self.loss_evaluator = make_roi_relation_loss_evaluator(cfg)
if self.cfg.MODEL.USE_RELPN:
self.relpn = make_relation_proposal_network(cfg)
self.freq_dist = None
self.use_bias = self.cfg.MODEL.ROI_RELATION_HEAD.USE_BIAS
self.use_gt_boxes = self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOXES
if self.use_gt_boxes:
self.box_avgpool = nn.AdaptiveAvgPool2d(1)
self.box_feature_extractor = make_roi_box_feature_extractor(cfg, in_channels)
self.box_predictor = make_roi_box_predictor(cfg, self.box_feature_extractor.out_channels)
self.box_post_processor = make_roi_box_post_processor(cfg)
self._freeze_components(cfg)
# if self.cfg.MODEL.USE_FREQ_PRIOR or self.cfg.MODEL.ROI_RELATION_HEAD.USE_BIAS:
# print("Using frequency bias: ", cfg.MODEL.FREQ_PRIOR)
# self.freq_dist_file = op.join(cfg.DATA_DIR, cfg.MODEL.FREQ_PRIOR)
self.freq_dist_file = "freq_prior.npy"
self.freq_dist = np.load(self.freq_dist_file)
if self.cfg.MODEL.USE_FREQ_PRIOR:
# never predict __no_relation__ for frequency prior
self.freq_dist[:, :, 0] = 0
# we use probability directly
self.freq_bias = FrequencyBias(self.freq_dist)
else:
self.freq_dist[:, :, 0] = 0
self.freq_dist = np.log(self.freq_dist + 1e-3)
# self.freq_bias = FrequencyBias(self.freq_dist)
self.freq_dist = torch.from_numpy(self.freq_dist)
# if self.cfg.MODEL.USE_FREQ_PRIOR:
# self.freq_dist = torch.from_numpy(np.load("freq_prior.npy"))
# self.freq_dist[:, :, 0] = 0
def _freeze_components(self, cfg):
for param in self.box_feature_extractor.parameters():
param.requires_grad = False
for param in self.box_predictor.parameters():
param.requires_grad = False
def _get_proposal_pairs(self, proposals):
proposal_pairs = []
for i, proposals_per_image in enumerate(proposals):
box_subj = proposals_per_image.bbox
box_obj = proposals_per_image.bbox
box_subj = box_subj.unsqueeze(1).repeat(1, box_subj.shape[0], 1)
box_obj = box_obj.unsqueeze(0).repeat(box_obj.shape[0], 1, 1)
proposal_box_pairs = torch.cat((box_subj.view(-1, 4), box_obj.view(-1, 4)), 1)
idx_subj = torch.arange(box_subj.shape[0]).view(-1, 1, 1).repeat(1, box_obj.shape[0], 1).to(proposals_per_image.bbox.device)
idx_obj = torch.arange(box_obj.shape[0]).view(1, -1, 1).repeat(box_subj.shape[0], 1, 1).to(proposals_per_image.bbox.device)
proposal_idx_pairs = torch.cat((idx_subj.view(-1, 1), idx_obj.view(-1, 1)), 1)
keep_idx = (proposal_idx_pairs[:, 0] != proposal_idx_pairs[:, 1]).nonzero().view(-1)
# if we filter non overlap bounding boxes
if self.cfg.MODEL.ROI_RELATION_HEAD.FILTER_NON_OVERLAP:
ious = boxlist_iou(proposals_per_image, proposals_per_image).view(-1)
ious = ious[keep_idx]
keep_idx = keep_idx[(ious > 0).nonzero().view(-1)]
proposal_idx_pairs = proposal_idx_pairs[keep_idx]
proposal_box_pairs = proposal_box_pairs[keep_idx]
proposal_pairs_per_image = BoxPairList(proposal_box_pairs, proposals_per_image.size, proposals_per_image.mode)
proposal_pairs_per_image.add_field("idx_pairs", proposal_idx_pairs)
proposal_pairs.append(proposal_pairs_per_image)
return proposal_pairs
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training and self.use_gt_boxes:
# augment proposals with ground-truth boxes
targets_cp = [target.copy_with_fields(target.fields()) for target in targets]
with torch.no_grad():
x = self.box_feature_extractor(features, targets_cp)
class_logits, box_regression = self.box_predictor(x)
boxes_per_image = [len(proposal) for proposal in targets_cp]
target_features = x.split(boxes_per_image, dim=0)
for proposal, target_feature in zip(targets_cp, target_features):
proposal.add_field("features", self.box_avgpool(target_feature))
proposals_gt = self.box_post_processor((class_logits, box_regression), targets_cp, skip_nms=True)
proposals = [cat_boxlist([proposal, proposal_gt]) for (proposal, proposal_gt) in zip(proposals, proposals_gt)]
if self.training:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
if self.cfg.MODEL.USE_RELPN:
proposal_pairs, loss_relpn = self.relpn(proposals, targets)
else:
proposal_pairs = self.loss_evaluator.subsample(proposals, targets)
else:
with torch.no_grad():
if self.cfg.MODEL.USE_RELPN:
proposal_pairs, relnesses = self.relpn(proposals)
else:
proposal_pairs = self.loss_evaluator.subsample(proposals)
if self.cfg.MODEL.USE_FREQ_PRIOR:
"""
if use frequency prior, we directly use the statistics
"""
x = None
obj_class_logits = None
_, obj_labels, im_inds = _get_tensor_from_boxlist(proposals, 'labels')
_, proposal_idx_pairs, im_inds_pairs = _get_tensor_from_boxlist(proposal_pairs, 'idx_pairs')
rel_inds = _get_rel_inds(im_inds, im_inds_pairs, proposal_idx_pairs)
pred_class_logits = self.freq_bias.index_with_labels(
torch.stack((obj_labels[rel_inds[:, 0]],obj_labels[rel_inds[:, 1]],), 1))
else:
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x, obj_class_logits, pred_class_logits, obj_class_labels, rel_inds = \
self.rel_predictor(features, proposals, proposal_pairs)
if self.use_bias:
pred_class_logits = pred_class_logits + self.freq_bias.index_with_labels(
torch.stack((
obj_class_labels[rel_inds[:, 0]],
obj_class_labels[rel_inds[:, 1]],
), 1))
if not self.training:
# NOTE: if we have updated object class logits, then we need to update proposals as well!!!
# if obj_class_logits is not None:
# boxes_per_image = [len(proposal) for proposal in proposals]
# obj_logits = obj_class_logits
# obj_scores, obj_labels = obj_class_logits[:, 1:].max(1)
# obj_labels = obj_labels + 1
# obj_logits = obj_logits.split(boxes_per_image, dim=0)
# obj_scores = obj_scores.split(boxes_per_image, dim=0)
# obj_labels = obj_labels.split(boxes_per_image, dim=0)
# for proposal, obj_logit, obj_score, obj_label in \
# zip(proposals, obj_logits, obj_scores, obj_labels):
# proposal.add_field("logits", obj_logit)
# proposal.add_field("scores", obj_score)
# proposal.add_field("labels", obj_label)
result = self.post_processor((pred_class_logits), proposal_pairs, use_freq_prior=self.cfg.MODEL.USE_FREQ_PRIOR)
# if self.cfg.MODEL.USE_RELPN:
# for res, relness in zip(result, relnesses):
# res.add_field("scores", res.get_field("scores") * relness.view(-1, 1))
return x, result, {}
loss_obj_classifier = 0
if obj_class_logits is not None:
loss_obj_classifier = self.loss_evaluator.obj_classification_loss(proposals, [obj_class_logits])
if self.cfg.MODEL.USE_RELPN:
idx = obj_class_labels[rel_inds[:, 0]] * 151 + obj_class_labels[rel_inds[:, 1]]
freq_prior = self.freq_dist.view(-1, 51)[idx].cuda()
loss_pred_classifier = self.relpn.pred_classification_loss([pred_class_logits], freq_prior=freq_prior)
return (
x,
proposal_pairs,
dict(loss_obj_classifier=loss_obj_classifier,
loss_relpn = loss_relpn,
loss_pred_classifier=loss_pred_classifier),
)
else:
loss_pred_classifier = self.loss_evaluator([pred_class_logits])
return (
x,
proposal_pairs,
dict(loss_obj_classifier=loss_obj_classifier,
loss_pred_classifier=loss_pred_classifier),
)
def build_roi_relation_head(cfg, in_channels):
"""
Constructs a new box head.
By default, uses ROIRelationHead, but if it turns out not to be enough, just register a new class
and make it a parameter in the config
"""
return ROIRelationHead(cfg, in_channels)
| 11,698 | 47.745833 | 136 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/roi_relation_box_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from lib.scene_parser.rcnn.modeling import registry
from torch import nn
@registry.ROI_RELATION_BOX_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
# num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
# self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
# nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
# nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
return cls_logit
# bbox_pred = self.bbox_pred(x)
# return cls_logit, bbox_pred
@registry.ROI_RELATION_BOX_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
def make_roi_relation_box_predictor(cfg, in_channels):
func = registry.ROI_RELATION_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg, in_channels)
| 2,371 | 36.0625 | 89 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from lib.scene_parser.rcnn.layers import smooth_l1_loss
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
from lib.scene_parser.rcnn.modeling.matcher import Matcher
from lib.scene_parser.rcnn.modeling.pair_matcher import PairMatcher
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_iou
from lib.scene_parser.rcnn.structures.bounding_box_pair import BoxPairList
from lib.scene_parser.rcnn.modeling.balanced_positive_negative_pair_sampler import (
BalancedPositiveNegativePairSampler
)
from lib.scene_parser.rcnn.modeling.utils import cat
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(
self,
cfg,
proposal_matcher,
fg_bg_pair_sampler,
box_coder,
cls_agnostic_bbox_reg=False,
use_matched_pairs_only=False,
minimal_matched_pairs=0,
):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativePairSampler)
box_coder (BoxCoder)
use_matched_pairs_only: sample only among the pairs that have large iou with ground-truth pairs
"""
self.cfg = cfg
self.proposal_pair_matcher = proposal_matcher
self.fg_bg_pair_sampler = fg_bg_pair_sampler
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.use_matched_pairs_only = use_matched_pairs_only
self.minimal_matched_pairs = minimal_matched_pairs
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
temp = []
target_box_pairs = []
# import pdb; pdb.set_trace()
for i in range(match_quality_matrix.shape[0]):
for j in range(match_quality_matrix.shape[0]):
match_i = match_quality_matrix[i].view(-1, 1)
match_j = match_quality_matrix[j].view(1, -1)
match_ij = ((match_i + match_j) / 2)
# rmeove duplicate index
non_duplicate_idx = (torch.eye(match_ij.shape[0]).view(-1) == 0).nonzero().view(-1).to(match_ij.device)
match_ij = match_ij.view(-1) # [::match_quality_matrix.shape[1]] = 0
match_ij = match_ij[non_duplicate_idx]
temp.append(match_ij)
boxi = target.bbox[i]; boxj = target.bbox[j]
box_pair = torch.cat((boxi, boxj), 0)
target_box_pairs.append(box_pair)
# import pdb; pdb.set_trace()
match_pair_quality_matrix = torch.stack(temp, 0).view(len(temp), -1)
target_box_pairs = torch.stack(target_box_pairs, 0)
target_pair = BoxPairList(target_box_pairs, target.size, target.mode)
target_pair.add_field("labels", target.get_field("pred_labels").view(-1))
box_subj = proposal.bbox
box_obj = proposal.bbox
box_subj = box_subj.unsqueeze(1).repeat(1, box_subj.shape[0], 1)
box_obj = box_obj.unsqueeze(0).repeat(box_obj.shape[0], 1, 1)
proposal_box_pairs = torch.cat((box_subj.view(-1, 4), box_obj.view(-1, 4)), 1)
idx_subj = torch.arange(box_subj.shape[0]).view(-1, 1, 1).repeat(1, box_obj.shape[0], 1).to(proposal.bbox.device)
idx_obj = torch.arange(box_obj.shape[0]).view(1, -1, 1).repeat(box_subj.shape[0], 1, 1).to(proposal.bbox.device)
proposal_idx_pairs = torch.cat((idx_subj.view(-1, 1), idx_obj.view(-1, 1)), 1)
non_duplicate_idx = (proposal_idx_pairs[:, 0] != proposal_idx_pairs[:, 1]).nonzero()
proposal_box_pairs = proposal_box_pairs[non_duplicate_idx.view(-1)]
proposal_idx_pairs = proposal_idx_pairs[non_duplicate_idx.view(-1)]
proposal_pairs = BoxPairList(proposal_box_pairs, proposal.size, proposal.mode)
proposal_pairs.add_field("idx_pairs", proposal_idx_pairs)
# matched_idxs = self.proposal_matcher(match_quality_matrix)
matched_idxs = self.proposal_pair_matcher(match_pair_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
# target = target.copy_with_fields("pred_labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
if self.use_matched_pairs_only and \
(matched_idxs >= 0).sum() > self.minimal_matched_pairs:
# filter all matched_idxs < 0
proposal_pairs = proposal_pairs[matched_idxs >= 0]
matched_idxs = matched_idxs[matched_idxs >= 0]
matched_targets = target_pair[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets, proposal_pairs
def prepare_targets(self, proposals, targets):
labels = []
proposal_pairs = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets, proposal_pairs_per_image = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
# regression_targets_per_image = self.box_coder.encode(
# matched_targets.bbox, proposals_per_image.bbox
# )
labels.append(labels_per_image)
proposal_pairs.append(proposal_pairs_per_image)
# regression_targets.append(regression_targets_per_image)
return labels, proposal_pairs
def _randomsample_train(self, proposals, targets):
"""
perform relpn based sampling during training
"""
labels, proposal_pairs = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_pair_sampler(labels)
proposal_pairs = list(proposal_pairs)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, proposal_pairs_per_image in zip(
labels, proposal_pairs
):
proposal_pairs_per_image.add_field("labels", labels_per_image)
# proposals_per_image.add_field(
# "regression_targets", regression_targets_per_image
# )
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
proposal_pairs_per_image = proposal_pairs[img_idx][img_sampled_inds]
proposal_pairs[img_idx] = proposal_pairs_per_image
self._proposal_pairs = proposal_pairs
return proposal_pairs
def _fullsample_test(self, proposals):
"""
This method get all subject-object pairs, and return the proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
"""
proposal_pairs = []
for i, proposals_per_image in enumerate(proposals):
box_subj = proposals_per_image.bbox
box_obj = proposals_per_image.bbox
box_subj = box_subj.unsqueeze(1).repeat(1, box_subj.shape[0], 1)
box_obj = box_obj.unsqueeze(0).repeat(box_obj.shape[0], 1, 1)
proposal_box_pairs = torch.cat((box_subj.view(-1, 4), box_obj.view(-1, 4)), 1)
idx_subj = torch.arange(box_subj.shape[0]).view(-1, 1, 1).repeat(1, box_obj.shape[0], 1).to(proposals_per_image.bbox.device)
idx_obj = torch.arange(box_obj.shape[0]).view(1, -1, 1).repeat(box_subj.shape[0], 1, 1).to(proposals_per_image.bbox.device)
proposal_idx_pairs = torch.cat((idx_subj.view(-1, 1), idx_obj.view(-1, 1)), 1)
keep_idx = (proposal_idx_pairs[:, 0] != proposal_idx_pairs[:, 1]).nonzero().view(-1)
# if we filter non overlap bounding boxes
if self.cfg.MODEL.ROI_RELATION_HEAD.FILTER_NON_OVERLAP:
ious = boxlist_iou(proposals_per_image, proposals_per_image).view(-1)
ious = ious[keep_idx]
keep_idx = keep_idx[(ious > 0).nonzero().view(-1)]
proposal_idx_pairs = proposal_idx_pairs[keep_idx]
proposal_box_pairs = proposal_box_pairs[keep_idx]
proposal_pairs_per_image = BoxPairList(proposal_box_pairs, proposals_per_image.size, proposals_per_image.mode)
proposal_pairs_per_image.add_field("idx_pairs", proposal_idx_pairs)
proposal_pairs.append(proposal_pairs_per_image)
return proposal_pairs
def subsample(self, proposals, targets=None):
"""
This method performs the random positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
if targets is not None:
proposal_pairs = self._randomsample_train(proposals, targets)
else:
proposal_pairs = self._fullsample_test(proposals)
return proposal_pairs
# labels, proposal_pairs = self.prepare_targets(proposals, targets)
# sampled_pos_inds, sampled_neg_inds = self.fg_bg_pair_sampler(labels)
#
# proposal_pairs = list(proposal_pairs)
# # add corresponding label and regression_targets information to the bounding boxes
# for labels_per_image, proposal_pairs_per_image in zip(
# labels, proposal_pairs
# ):
# proposal_pairs_per_image.add_field("labels", labels_per_image)
# # proposals_per_image.add_field(
# # "regression_targets", regression_targets_per_image
# # )
#
# # distributed sampled proposals, that were obtained on all feature maps
# # concatenated via the fg_bg_sampler, into individual feature map levels
# for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
# zip(sampled_pos_inds, sampled_neg_inds)
# ):
# img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
# proposal_pairs_per_image = proposal_pairs[img_idx][img_sampled_inds]
# proposal_pairs[img_idx] = proposal_pairs_per_image
#
# self._proposal_pairs = proposal_pairs
# return proposal_pairs
def __call__(self, class_logits):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
Returns:
classification_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
device = class_logits.device
if not hasattr(self, "_proposal_pairs"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposal_pairs
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
rel_fg_cnt = len(labels.nonzero())
rel_bg_cnt = labels.shape[0] - rel_fg_cnt
ce_weights = labels.new(class_logits.size(1)).fill_(1).float()
ce_weights[0] = float(rel_fg_cnt) / (rel_bg_cnt + 1e-5)
classification_loss = F.cross_entropy(class_logits, labels, weight=ce_weights)
return classification_loss
def obj_classification_loss(self, proposals, class_logits):
class_logits = cat(class_logits, dim=0)
device = class_logits.device
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
classification_loss = F.cross_entropy(class_logits, labels)
return classification_loss
def make_roi_relation_loss_evaluator(cfg):
matcher = PairMatcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativePairSampler(
cfg.MODEL.ROI_RELATION_HEAD.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_RELATION_HEAD.POSITIVE_FRACTION
)
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
loss_evaluator = FastRCNNLossComputation(
cfg,
matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg
)
return loss_evaluator
| 13,391 | 41.514286 | 136 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/roi_relation_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from lib.scene_parser.rcnn.modeling import registry
import torch
from torch import nn
@registry.ROI_RELATION_PREDICTOR.register("FastRCNNRelationPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = config.MODEL.ROI_RELATION_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
# self.cls_score.weight = torch.nn.init.xavier_normal(self.cls_score.weight, gain=1.0)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
return cls_logit
@registry.ROI_RELATION_PREDICTOR.register("FPNRelationPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_RELATION_HEAD.NUM_CLASSES
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
def make_roi_relation_predictor(cfg, in_channels):
func = registry.ROI_RELATION_PREDICTOR[cfg.MODEL.ROI_RELATION_HEAD.PREDICTOR]
return func(cfg, in_channels)
| 2,129 | 35.724138 | 94 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/roi_relation_box_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from lib.scene_parser.rcnn.modeling import registry
from lib.scene_parser.rcnn.modeling.backbone import resnet
from lib.scene_parser.rcnn.modeling.poolers import Pooler
from lib.scene_parser.rcnn.modeling.make_layers import group_norm
from lib.scene_parser.rcnn.modeling.make_layers import make_fc
@registry.ROI_RELATION_BOX_FEATURE_EXTRACTORS.register("ResNet50Conv5ROIFeatureExtractor")
class ResNet50Conv5ROIFeatureExtractor(nn.Module):
def __init__(self, config, in_channels):
super(ResNet50Conv5ROIFeatureExtractor, self).__init__()
resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
stage = resnet.StageSpec(index=4, block_count=3, return_features=False)
head = resnet.ResNetHead(
block_module=config.MODEL.RESNETS.TRANS_FUNC,
stages=(stage,),
num_groups=config.MODEL.RESNETS.NUM_GROUPS,
width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP,
stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1,
stride_init=None,
res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS,
dilation=config.MODEL.RESNETS.RES5_DILATION
)
self.pooler = pooler
self.head = head
self.out_channels = head.out_channels
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.head(x)
return x
@registry.ROI_RELATION_BOX_FEATURE_EXTRACTORS.register("FPN2MLPFeatureExtractor")
class FPN2MLPFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPN2MLPFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = in_channels * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
self.pooler = pooler
self.fc6 = make_fc(input_size, representation_size, use_gn)
self.fc7 = make_fc(representation_size, representation_size, use_gn)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
@registry.ROI_RELATION_BOX_FEATURE_EXTRACTORS.register("FPNXconv1fcFeatureExtractor")
class FPNXconv1fcFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
super(FPNXconv1fcFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
self.pooler = pooler
use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN
conv_head_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM
num_stacked_convs = cfg.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS
dilation = cfg.MODEL.ROI_BOX_HEAD.DILATION
xconvs = []
for ix in range(num_stacked_convs):
xconvs.append(
nn.Conv2d(
in_channels,
conv_head_dim,
kernel_size=3,
stride=1,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
)
in_channels = conv_head_dim
if use_gn:
xconvs.append(group_norm(in_channels))
xconvs.append(nn.ReLU(inplace=True))
self.add_module("xconvs", nn.Sequential(*xconvs))
for modules in [self.xconvs,]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
if not use_gn:
torch.nn.init.constant_(l.bias, 0)
input_size = conv_head_dim * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.fc6 = make_fc(input_size, representation_size, use_gn=False)
self.out_channels = representation_size
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.xconvs(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
return x
def make_roi_relation_box_feature_extractor(cfg, in_channels):
func = registry.ROI_RELATION_BOX_FEATURE_EXTRACTORS[
cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR
]
return func(cfg, in_channels)
| 5,464 | 34.953947 | 90 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/sparse_targets.py | import torch
import torch.nn as nn
class FrequencyBias(nn.Module):
"""
The goal of this is to provide a simplified way of computing
P(predicate | obj1, obj2, img).
"""
def __init__(self, pred_dist):
# pred_dist: [num_classes, num_classes, num_preds] numpy array
super(FrequencyBias, self).__init__()
self.num_objs = pred_dist.shape[0]
pred_dist = torch.FloatTensor(pred_dist).view(-1, pred_dist.shape[2])
self.obj_baseline = nn.Embedding(pred_dist.size(0), pred_dist.size(1))
self.obj_baseline.weight.data = pred_dist
def index_with_labels(self, labels):
"""
:param labels: [batch_size, 2]
:return:
"""
return self.obj_baseline(labels[:, 0] * self.num_objs + labels[:, 1])
def forward(self, obj_cands0, obj_cands1):
"""
:param obj_cands0: [batch_size, 151] prob distibution over cands.
:param obj_cands1: [batch_size, 151] prob distibution over cands.
:return: [batch_size, #predicates] array, which contains potentials for
each possibility
"""
# [batch_size, 151, 151] repr of the joint distribution
joint_cands = obj_cands0[:, :, None] * obj_cands1[:, None]
# [151, 151, 51] of targets per.
baseline = joint_cands.view(joint_cands.size(0), -1) @ self.obj_baseline.weight
return baseline
def _get_tensor_from_boxlist(proposals, field='labels'):
# helper function for getting
# tensor data from BoxList structures
# /*need to specify data field name*/
assert proposals[0].extra_fields[field] is not None
for im_ind, prop_per_im in enumerate(proposals):
if im_ind == 0:
num_proposals_im = prop_per_im.bbox.size(0)
# get data
bbox_batch = prop_per_im.bbox
output_batch = prop_per_im.extra_fields[field]
# im_inds
im_inds = im_ind * torch.ones(num_proposals_im, 1)
else:
num_proposals_im = prop_per_im.bbox.size(0)
bbox_batch = torch.cat((bbox_batch, prop_per_im.bbox),
dim=0) # N by 4
output_batch = torch.cat(
(output_batch, prop_per_im.extra_fields[field]), dim=0)
im_inds = torch.cat(
(im_inds, im_ind * torch.ones(num_proposals_im, 1)), dim=0)
# TODO: support both cpu and gpu
im_inds_batch = torch.Tensor(im_inds).long().cuda()
return bbox_batch, output_batch, im_inds_batch
def _get_rel_inds(im_inds, im_inds_pairs, proposal_idx_pairs):
rel_ind_sub = proposal_idx_pairs[:, 0]
rel_ind_obj = proposal_idx_pairs[:, 1]
# idxs in the rel_ind_sub, rel_ind_obj are based on per image index
# we need to add those inds by a offset [0,0,0... 64, 64, 64...]
# per image number objects
# num_obj_im = torch.unique(im_inds, return_counts=True)[1]
num_obj_im = torch.unique(im_inds)
# cum sum torch.cumsum. this is the raw value for offsets
num_obj_im = torch.cumsum(num_obj_im, dim=0)
# im_inds -1 for offset value
# then set 0-th image has offset 0
rel_ind_offset_im = num_obj_im[im_inds_pairs - 1]
# num_rels_im = torch.unique(im_inds_pairs, return_counts=True)[1]
num_rels_im = torch.unique(im_inds_pairs)
rel_ind_offset_im[:num_rels_im[0]] = 0 # first image needs no offset
rel_ind_offset_im = torch.squeeze(rel_ind_offset_im)
rel_ind_sub += rel_ind_offset_im
rel_ind_obj += rel_ind_offset_im
return torch.cat((rel_ind_sub[:, None], rel_ind_obj[:, None]), 1)
| 3,594 | 35.313131 | 87 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/imp/imp.py | # Scene Graph Generation by Iterative Message Passing
# Reimplemented by Jianwei Yang (jw2yang@gatech.edu)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from ..roi_relation_feature_extractors import make_roi_relation_feature_extractor
from ..roi_relation_box_feature_extractors import make_roi_relation_box_feature_extractor
from ..roi_relation_box_predictors import make_roi_relation_box_predictor
from ..roi_relation_predictors import make_roi_relation_predictor
class IMP(nn.Module):
# def __init__(self, fea_size, dropout=False, gate_width=1, use_kernel_function=False):
def __init__(self, cfg, in_channels):
super(IMP, self).__init__()
self.cfg = cfg
self.dim = 512
self.update_step = cfg.MODEL.ROI_RELATION_HEAD.IMP_FEATURE_UPDATE_STEP
self.avgpool = nn.AdaptiveAvgPool2d(1)
# self.obj_feature_extractor = make_roi_relation_box_feature_extractor(cfg, in_channels)
self.pred_feature_extractor = make_roi_relation_feature_extractor(cfg, in_channels)
self.obj_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
self.pred_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
if self.update_step > 0:
self.edge_gru = nn.GRUCell(input_size=self.dim, hidden_size=self.dim)
self.node_gru = nn.GRUCell(input_size=self.dim, hidden_size=self.dim)
self.subj_node_gate = nn.Sequential(nn.Linear(self.dim * 2, 1), nn.Sigmoid())
self.obj_node_gate = nn.Sequential(nn.Linear(self.dim * 2, 1), nn.Sigmoid())
self.subj_edge_gate = nn.Sequential(nn.Linear(self.dim * 2, 1), nn.Sigmoid())
self.obj_edge_gate = nn.Sequential(nn.Linear(self.dim * 2, 1), nn.Sigmoid())
self.obj_predictor = make_roi_relation_box_predictor(cfg, 512)
self.pred_predictor = make_roi_relation_predictor(cfg, 512)
def _get_map_idxs(self, proposals, proposal_pairs):
rel_inds = []
offset = 0
for proposal, proposal_pair in zip(proposals, proposal_pairs):
rel_ind_i = proposal_pair.get_field("idx_pairs").detach()
rel_ind_i += offset
offset += len(proposal)
rel_inds.append(rel_ind_i)
rel_inds = torch.cat(rel_inds, 0)
subj_pred_map = rel_inds.new(sum([len(proposal) for proposal in proposals]), rel_inds.shape[0]).fill_(0).float().detach()
obj_pred_map = rel_inds.new(sum([len(proposal) for proposal in proposals]), rel_inds.shape[0]).fill_(0).float().detach()
subj_pred_map.scatter_(0, (rel_inds[:, 0].contiguous().view(1, -1)), 1)
obj_pred_map.scatter_(0, (rel_inds[:, 1].contiguous().view(1, -1)), 1)
return rel_inds, subj_pred_map, obj_pred_map
def forward(self, features, proposals, proposal_pairs):
rel_inds, subj_pred_map, obj_pred_map = self._get_map_idxs(proposals, proposal_pairs)
x_obj = torch.cat([proposal.get_field("features") for proposal in proposals], 0)
# x_obj = self.avgpool(self.obj_feature_extractor(features, proposals))
x_pred, _ = self.pred_feature_extractor(features, proposals, proposal_pairs)
x_pred = self.avgpool(x_pred)
x_obj = x_obj.view(x_obj.size(0), -1); x_pred = x_pred.view(x_pred.size(0), -1)
x_obj = self.obj_embedding(x_obj); x_pred = self.pred_embedding(x_pred)
# hx_obj = x_obj.clone().fill_(0).detach()
# hx_pred = x_pred.clone().fill_(0).detach()
# hx_obj = [self.node_gru(x_obj, hx_obj)]
# hx_edge = [self.edge_gru(x_pred, hx_pred)]
hx_obj = [x_obj]
hx_edge = [x_pred]
for t in range(self.update_step):
sub_vert = hx_obj[t][rel_inds[:, 0]] #
obj_vert = hx_obj[t][rel_inds[:, 1]]
'''update object features'''
message_pred_to_subj = self.subj_node_gate(torch.cat([sub_vert, hx_edge[t]], 1)) * hx_edge[t] # nrel x d
message_pred_to_obj = self.obj_node_gate(torch.cat([obj_vert, hx_edge[t]], 1)) * hx_edge[t] # nrel x d
node_message = (torch.mm(subj_pred_map, message_pred_to_subj) / (subj_pred_map.sum(1, keepdim=True) + 1e-5) \
+ torch.mm(obj_pred_map, message_pred_to_obj) / (obj_pred_map.sum(1, keepdim=True) + 1e-5)) / 2.
hx_obj.append(self.node_gru(node_message, hx_obj[t]))
# hx_obj.append(F.relu(node_message + hx_obj[t]))
'''update predicat features'''
message_subj_to_pred = self.subj_edge_gate(torch.cat([sub_vert, hx_edge[t]], 1)) * sub_vert # nrel x d
message_obj_to_pred = self.obj_edge_gate(torch.cat([obj_vert, hx_edge[t]], 1)) * obj_vert # nrel x d
edge_message = (message_subj_to_pred + message_obj_to_pred) / 2.
hx_edge.append(self.edge_gru(edge_message, hx_edge[t]))
# hx_edge.append(F.relu(edge_message + hx_edge[t]))
'''compute results and losses'''
# final classifier that converts the features into predictions
# for object prediction, we do not do bbox regression again
obj_class_logits = self.obj_predictor(hx_obj[-1].unsqueeze(2).unsqueeze(3))
pred_class_logits = self.pred_predictor(hx_edge[-1].unsqueeze(2).unsqueeze(3))
if obj_class_logits is None:
logits = torch.cat([proposal.get_field("logits") for proposal in proposals], 0)
obj_class_labels = logits[:, 1:].max(1)[1] + 1
else:
obj_class_labels = obj_class_logits[:, 1:].max(1)[1] + 1
return (hx_obj[-1], hx_edge[-1]), obj_class_logits, pred_class_logits, obj_class_labels, rel_inds
def build_imp_model(cfg, in_channels):
return IMP(cfg, in_channels)
| 6,009 | 49.083333 | 129 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/baseline/baseline.py | # Scene Graph Generation with baseline (vanilla) model
# Reimnplemetned by Jianwei Yang (jw2yang@gatech.edu)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
from ..roi_relation_feature_extractors import make_roi_relation_feature_extractor
from ..roi_relation_predictors import make_roi_relation_predictor
class Baseline(nn.Module):
def __init__(self, cfg, in_channels):
super(Baseline, self).__init__()
self.cfg = cfg
self.pred_feature_extractor = make_roi_relation_feature_extractor(cfg, in_channels)
self.predictor = make_roi_relation_predictor(cfg, self.pred_feature_extractor.out_channels)
def forward(self, features, proposals, proposal_pairs):
obj_class_logits = None # no need to predict object class again
if self.training:
x, rel_inds = self.pred_feature_extractor(features, proposals, proposal_pairs)
rel_class_logits = self.predictor(x)
else:
with torch.no_grad():
x, rel_inds = self.pred_feature_extractor(features, proposals, proposal_pairs)
rel_class_logits = self.predictor(x)
if obj_class_logits is None:
logits = torch.cat([proposal.get_field("logits") for proposal in proposals], 0)
obj_class_labels = logits[:, 1:].max(1)[1] + 1
else:
obj_class_labels = obj_class_logits[:, 1:].max(1)[1] + 1
return x, obj_class_logits, rel_class_logits, obj_class_labels, rel_inds
def build_baseline_model(cfg, in_channels):
return Baseline(cfg, in_channels)
| 1,668 | 41.794872 | 99 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/grcnn/grcnn.py | # Graph R-CNN for scene graph generation
# Reimnplemetned by Jianwei Yang (jw2yang@gatech.edu)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from ..roi_relation_feature_extractors import make_roi_relation_feature_extractor
from ..roi_relation_box_feature_extractors import make_roi_relation_box_feature_extractor
from ..roi_relation_box_predictors import make_roi_relation_box_predictor
from ..roi_relation_predictors import make_roi_relation_predictor
from .agcn.agcn import _GraphConvolutionLayer_Collect, _GraphConvolutionLayer_Update
class GRCNN(nn.Module):
# def __init__(self, fea_size, dropout=False, gate_width=1, use_kernel_function=False):
def __init__(self, cfg, in_channels):
super(GRCNN, self).__init__()
self.cfg = cfg
self.dim = 1024
self.feat_update_step = cfg.MODEL.ROI_RELATION_HEAD.GRCNN_FEATURE_UPDATE_STEP
self.score_update_step = cfg.MODEL.ROI_RELATION_HEAD.GRCNN_SCORE_UPDATE_STEP
num_classes_obj = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_classes_pred = cfg.MODEL.ROI_RELATION_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
# self.obj_feature_extractor = make_roi_relation_box_feature_extractor(cfg, in_channels)
self.pred_feature_extractor = make_roi_relation_feature_extractor(cfg, in_channels)
self.obj_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
self.rel_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
if self.feat_update_step > 0:
self.gcn_collect_feat = _GraphConvolutionLayer_Collect(self.dim, self.dim)
self.gcn_update_feat = _GraphConvolutionLayer_Update(self.dim, self.dim)
if self.score_update_step > 0:
self.gcn_collect_score = _GraphConvolutionLayer_Collect(num_classes_obj, num_classes_pred)
self.gcn_update_score = _GraphConvolutionLayer_Update(num_classes_obj, num_classes_pred)
self.obj_predictor = make_roi_relation_box_predictor(cfg, self.dim)
self.pred_predictor = make_roi_relation_predictor(cfg, self.dim)
def _get_map_idxs(self, proposals, proposal_pairs):
rel_inds = []
offset = 0
obj_num = sum([len(proposal) for proposal in proposals])
obj_obj_map = torch.FloatTensor(obj_num, obj_num).fill_(0)
for proposal, proposal_pair in zip(proposals, proposal_pairs):
rel_ind_i = proposal_pair.get_field("idx_pairs").detach()
obj_obj_map_i = (1 - torch.eye(len(proposal))).float()
obj_obj_map[offset:offset + len(proposal), offset:offset + len(proposal)] = obj_obj_map_i
rel_ind_i += offset
offset += len(proposal)
rel_inds.append(rel_ind_i)
rel_inds = torch.cat(rel_inds, 0)
subj_pred_map = rel_inds.new(obj_num, rel_inds.shape[0]).fill_(0).float().detach()
obj_pred_map = rel_inds.new(obj_num, rel_inds.shape[0]).fill_(0).float().detach()
subj_pred_map.scatter_(0, (rel_inds[:, 0].contiguous().view(1, -1)), 1)
obj_pred_map.scatter_(0, (rel_inds[:, 1].contiguous().view(1, -1)), 1)
obj_obj_map = obj_obj_map.type_as(obj_pred_map)
return rel_inds, obj_obj_map, subj_pred_map, obj_pred_map
def forward(self, features, proposals, proposal_pairs):
rel_inds, obj_obj_map, subj_pred_map, obj_pred_map = self._get_map_idxs(proposals, proposal_pairs)
x_obj = torch.cat([proposal.get_field("features").detach() for proposal in proposals], 0)
obj_class_logits = torch.cat([proposal.get_field("logits").detach() for proposal in proposals], 0)
# x_obj = self.avgpool(self.obj_feature_extractor(features, proposals))
x_pred, _ = self.pred_feature_extractor(features, proposals, proposal_pairs)
x_pred = self.avgpool(x_pred)
x_obj = x_obj.view(x_obj.size(0), -1); x_obj = self.obj_embedding(x_obj)
x_pred = x_pred.view(x_pred.size(0), -1); x_pred = self.rel_embedding(x_pred)
'''feature level agcn'''
obj_feats = [x_obj]
pred_feats = [x_pred]
for t in range(self.feat_update_step):
# message from other objects
source_obj = self.gcn_collect_feat(obj_feats[t], obj_feats[t], obj_obj_map, 4)
source_rel_sub = self.gcn_collect_feat(obj_feats[t], pred_feats[t], subj_pred_map, 0)
source_rel_obj = self.gcn_collect_feat(obj_feats[t], pred_feats[t], obj_pred_map, 1)
source2obj_all = (source_obj + source_rel_sub + source_rel_obj) / 3
obj_feats.append(self.gcn_update_feat(obj_feats[t], source2obj_all, 0))
'''update predicate logits'''
source_obj_sub = self.gcn_collect_feat(pred_feats[t], obj_feats[t], subj_pred_map.t(), 2)
source_obj_obj = self.gcn_collect_feat(pred_feats[t], obj_feats[t], obj_pred_map.t(), 3)
source2rel_all = (source_obj_sub + source_obj_obj) / 2
pred_feats.append(self.gcn_update_feat(pred_feats[t], source2rel_all, 1))
obj_class_logits = self.obj_predictor(obj_feats[-1].unsqueeze(2).unsqueeze(3))
pred_class_logits = self.pred_predictor(pred_feats[-1].unsqueeze(2).unsqueeze(3))
'''score level agcn'''
obj_scores = [obj_class_logits]
pred_scores = [pred_class_logits]
for t in range(self.score_update_step):
'''update object logits'''
# message from other objects
source_obj = self.gcn_collect_score(obj_scores[t], obj_scores[t], obj_obj_map, 4)
#essage from predicate
source_rel_sub = self.gcn_collect_score(obj_scores[t], pred_scores[t], subj_pred_map, 0)
source_rel_obj = self.gcn_collect_score(obj_scores[t], pred_scores[t], obj_pred_map, 1)
source2obj_all = (source_obj + source_rel_sub + source_rel_obj) / 3
obj_scores.append(self.gcn_update_score(obj_scores[t], source2obj_all, 0))
'''update predicate logits'''
source_obj_sub = self.gcn_collect_score(pred_scores[t], obj_scores[t], subj_pred_map.t(), 2)
source_obj_obj = self.gcn_collect_score(pred_scores[t], obj_scores[t], obj_pred_map.t(), 3)
source2rel_all = (source_obj_sub + source_obj_obj) / 2
pred_scores.append(self.gcn_update_score(pred_scores[t], source2rel_all, 1))
obj_class_logits = obj_scores[-1]
pred_class_logits = pred_scores[-1]
if obj_class_logits is None:
logits = torch.cat([proposal.get_field("logits") for proposal in proposals], 0)
obj_class_labels = logits[:, 1:].max(1)[1] + 1
else:
obj_class_labels = obj_class_logits[:, 1:].max(1)[1] + 1
return (x_pred), obj_class_logits, pred_class_logits, obj_class_labels, rel_inds
def build_grcnn_model(cfg, in_channels):
return GRCNN(cfg, in_channels)
| 7,173 | 49.521127 | 106 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/grcnn/agcn/agcn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
import time
def normal_init(m, mean, stddev, truncated=False):
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
class _Collection_Unit(nn.Module):
def __init__(self, dim_in, dim_out):
super(_Collection_Unit, self).__init__()
self.fc = nn.Linear(dim_in, dim_out, bias=True)
normal_init(self.fc, 0, 0.01)
def forward(self, target, source, attention_base):
# assert attention_base.size(0) == source.size(0), "source number must be equal to attention number"
fc_out = F.relu(self.fc(source))
collect = torch.mm(attention_base, fc_out) # Nobj x Nrel Nrel x dim
collect_avg = collect / (attention_base.sum(1).view(collect.size(0), 1) + 1e-7)
return collect_avg
# def __init__(self, dim, dim_lr):
# super(_Collection_Unit, self).__init__()
# dim_lowrank = dim_lr
# self.fc_lft = nn.Linear(dim, dim_lowrank, bias=True)
# self.fc_rgt = nn.Linear(dim_lowrank, dim, bias=True)
# normal_init(self.fc_lft, 0, 0.001, cfg.TRAIN.TRUNCATED)
# normal_init(self.fc_rgt, 0, 0.001, cfg.TRAIN.TRUNCATED)
# def forward(self, target, source, attention_base):
# # assert attention_base.size(0) == source.size(0), "source number must be equal to attention number"
# fc_left_out = self.fc_lft(source)
# fc_right_out = self.fc_rgt(fc_left_out)
# collect = torch.mm(attention_base, fc_right_out)
# collect_avg = collect / (attention_base.sum(1).view(collect.size(0), 1) + 1e-7)
# out = F.relu(collect_avg)
# return out
class _Update_Unit(nn.Module):
def __init__(self, dim):
super(_Update_Unit, self).__init__()
def forward(self, target, source):
assert target.size() == source.size(), "source dimension must be equal to target dimension"
update = target + source
return update
class _GraphConvolutionLayer_Collect(nn.Module):
""" graph convolutional layer """
""" collect information from neighbors """
def __init__(self, dim_obj, dim_rel):
super(_GraphConvolutionLayer_Collect, self).__init__()
self.collect_units = nn.ModuleList()
self.collect_units.append(_Collection_Unit(dim_rel, dim_obj)) # obj (subject) from rel
self.collect_units.append(_Collection_Unit(dim_rel, dim_obj)) # obj (object) from rel
self.collect_units.append(_Collection_Unit(dim_obj, dim_rel)) # rel from obj (subject)
self.collect_units.append(_Collection_Unit(dim_obj, dim_rel)) # rel from obj (object)
self.collect_units.append(_Collection_Unit(dim_obj, dim_obj)) # obj from obj
def forward(self, target, source, attention, unit_id):
collection = self.collect_units[unit_id](target, source, attention)
return collection
class _GraphConvolutionLayer_Update(nn.Module):
""" graph convolutional layer """
""" update target nodes """
def __init__(self, dim_obj, dim_rel):
super(_GraphConvolutionLayer_Update, self).__init__()
self.update_units = nn.ModuleList()
self.update_units.append(_Update_Unit(dim_obj)) # obj from others
self.update_units.append(_Update_Unit(dim_rel)) # rel from others
def forward(self, target, source, unit_id):
update = self.update_units[unit_id](target, source)
return update
| 3,579 | 43.75 | 110 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/reldn/reldn.py | # Scene Graph Generation by Iterative Message Passing
# Reimnplemetned by Jianwei Yang (jw2yang@gatech.edu)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from ..roi_relation_feature_extractors import make_roi_relation_feature_extractor
from ..roi_relation_box_feature_extractors import make_roi_relation_box_feature_extractor
from ..roi_relation_box_predictors import make_roi_relation_box_predictor
from ..roi_relation_predictors import make_roi_relation_predictor
from .spatial import build_spatial_feature
# from .semantic import build_semantic_feature
# from .visual import build_visual_feature
class RelDN(nn.Module):
# def __init__(self, fea_size, dropout=False, gate_width=1, use_kernel_function=False):
def __init__(self, cfg, in_channels, eps=1e-10):
super(RelDN, self).__init__()
self.cfg = cfg
self.dim = 512
self.update_step = cfg.MODEL.ROI_RELATION_HEAD.IMP_FEATURE_UPDATE_STEP
self.avgpool = nn.AdaptiveAvgPool2d(1)
# self.obj_feature_extractor = make_roi_relation_box_feature_extractor(cfg, in_channels)
self.pred_feature_extractor = make_roi_relation_feature_extractor(cfg, in_channels)
num_classes = cfg.MODEL.ROI_RELATION_HEAD.NUM_CLASSES
self.obj_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
self.pred_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
self.rel_embedding = nn.Sequential(
nn.Linear(3 * self.dim, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
nn.ReLU(True)
)
self.rel_spatial_feat = build_spatial_feature(cfg, self.dim)
self.rel_subj_predictor = make_roi_relation_predictor(cfg, 512)
self.rel_obj_predictor = make_roi_relation_predictor(cfg, 512)
self.rel_pred_predictor = make_roi_relation_predictor(cfg, 512)
self.rel_spt_predictor = nn.Linear(64, num_classes)
self.freq_dist = torch.from_numpy(np.load("freq_prior.npy"))
self.pred_dist = 10 * self.freq_dist # np.log(self.freq_dist + eps)
self.num_objs = self.pred_dist.shape[0]
self.pred_dist = torch.FloatTensor(self.pred_dist).view(-1, self.pred_dist.shape[2]).cuda()
# self.rel_sem_predictor = nn.Embedding(self.pred_dist.size(0), self.pred_dist.size(1))
# self.rel_sem_predictor.weight.data = self.pred_dist
def _get_map_idxs(self, proposals, proposal_pairs):
rel_inds = []
offset = 0
for proposal, proposal_pair in zip(proposals, proposal_pairs):
rel_ind_i = proposal_pair.get_field("idx_pairs").detach().clone()
rel_ind_i += offset
offset += len(proposal)
rel_inds.append(rel_ind_i)
rel_inds = torch.cat(rel_inds, 0)
subj_pred_map = rel_inds.new(sum([len(proposal) for proposal in proposals]), rel_inds.shape[0]).fill_(0).float().detach()
obj_pred_map = rel_inds.new(sum([len(proposal) for proposal in proposals]), rel_inds.shape[0]).fill_(0).float().detach()
subj_pred_map.scatter_(0, (rel_inds[:, 0].contiguous().view(1, -1)), 1)
obj_pred_map.scatter_(0, (rel_inds[:, 1].contiguous().view(1, -1)), 1)
return rel_inds, subj_pred_map, obj_pred_map
def forward(self, features, proposals, proposal_pairs):
obj_class_logits = None
rel_inds, subj_pred_map, obj_pred_map = self._get_map_idxs(proposals, proposal_pairs)
x_obj = torch.cat([proposal.get_field("features").detach() for proposal in proposals], 0)
# features = [feature.detach() for feature in features]
# x_obj = self.avgpool(self.obj_feature_extractor(features, proposals))
x_pred, _ = self.pred_feature_extractor(features, proposals, proposal_pairs)
x_pred = self.avgpool(x_pred)
x_obj = x_obj.view(x_obj.size(0), -1); x_pred = x_pred.view(x_pred.size(0), -1)
x_obj = self.obj_embedding(x_obj); x_pred = self.pred_embedding(x_pred)
sub_vert = x_obj[rel_inds[:, 0]] #
obj_vert = x_obj[rel_inds[:, 1]]
'''compute visual scores'''
rel_subj_class_logits = self.rel_subj_predictor(sub_vert.unsqueeze(2).unsqueeze(3))
rel_obj_class_logits = self.rel_obj_predictor(obj_vert.unsqueeze(2).unsqueeze(3))
x_rel = torch.cat([sub_vert, obj_vert, x_pred], 1)
x_rel = self.rel_embedding(x_rel)
rel_pred_class_logits = self.rel_pred_predictor(x_rel.unsqueeze(2).unsqueeze(3))
rel_vis_class_logits = rel_pred_class_logits + rel_subj_class_logits + rel_obj_class_logits
# rel_vis_class_logits = rel_pred_class_logits # + rel_subj_class_logits + rel_obj_class_logits
'''compute spatial scores'''
edge_spt_feats = self.rel_spatial_feat(proposal_pairs)
rel_spt_class_logits = self.rel_spt_predictor(edge_spt_feats)
'''compute semantic scores'''
rel_sem_class_logits = []
for proposal_per_image, proposal_pairs_per_image in zip(proposals, proposal_pairs):
obj_labels = proposal_per_image.get_field("labels").detach()
rel_ind_i = proposal_pairs_per_image.get_field("idx_pairs").detach()
subj_vert_labels = obj_labels[rel_ind_i[:, 0]]
obj_vert_labels = obj_labels[rel_ind_i[:, 1]]
# class_logits_per_image = self.freq_dist[subj_vert_labels, :][:, obj_vert_labels].view(-1, self.freq_dist.size(-1))
# class_logits_per_image = self.rel_sem_predictor(subj_vert_labels * self.num_objs + obj_vert_labels)
class_logits_per_image = self.pred_dist[subj_vert_labels * self.num_objs + obj_vert_labels]
# rmeove duplicate index
# non_duplicate_idx = (torch.eye(obj_labels.shape[0]).view(-1) == 0).nonzero().view(-1).to(class_logits_per_image.device)
# class_logits_per_image = class_logits_per_image[non_duplicate_idx]
rel_sem_class_logits.append(class_logits_per_image)
rel_sem_class_logits = torch.cat(rel_sem_class_logits, 0)
rel_class_logits = rel_vis_class_logits + rel_sem_class_logits + rel_spt_class_logits #
if obj_class_logits is None:
logits = torch.cat([proposal.get_field("logits") for proposal in proposals], 0)
obj_class_labels = logits[:, 1:].max(1)[1] + 1
else:
obj_class_labels = obj_class_logits[:, 1:].max(1)[1] + 1
return (x_obj, x_pred), obj_class_logits, rel_class_logits, obj_class_labels, rel_inds
def build_reldn_model(cfg, in_channels):
return RelDN(cfg, in_channels)
| 6,871 | 47.055944 | 133 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/reldn/spatial.py | import torch
import torch.nn as nn
import numpy as np
from lib.scene_parser.rcnn.utils.boxes import bbox_transform_inv, boxes_union
class SpatialFeature(nn.Module):
def __init__(self, cfg, dim):
super(SpatialFeature, self).__init__()
self.model = nn.Sequential(
nn.Linear(28, 64), nn.LeakyReLU(0.1),
nn.Linear(64, 64), nn.LeakyReLU(0.1))
def _get_pair_feature(self, boxes1, boxes2):
delta_1 = bbox_transform_inv(boxes1, boxes2)
delta_2 = bbox_transform_inv(boxes2, boxes1)
spt_feat = np.hstack((delta_1, delta_2[:, :2]))
return spt_feat
def _get_box_feature(self, boxes, width, height):
f1 = boxes[:, 0] / width
f2 = boxes[:, 1] / height
f3 = boxes[:, 2] / width
f4 = boxes[:, 3] / height
f5 = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1) / (width * height)
return np.vstack((f1, f2, f3, f4, f5)).transpose()
def _get_spt_features(self, boxes1, boxes2, width, height):
boxes_u = boxes_union(boxes1, boxes2)
spt_feat_1 = self._get_box_feature(boxes1, width, height)
spt_feat_2 = self._get_box_feature(boxes2, width, height)
spt_feat_12 = self._get_pair_feature(boxes1, boxes2)
spt_feat_1u = self._get_pair_feature(boxes1, boxes_u)
spt_feat_u2 = self._get_pair_feature(boxes_u, boxes2)
return np.hstack((spt_feat_12, spt_feat_1u, spt_feat_u2, spt_feat_1, spt_feat_2))
def forward(self, proposal_pairs):
spt_feats = []
for proposal_pair in proposal_pairs:
boxes_subj = proposal_pair.bbox[:, :4]
boxes_obj = proposal_pair.bbox[:, 4:]
spt_feat = self._get_spt_features(boxes_subj.cpu().numpy(), boxes_obj.cpu().numpy(), proposal_pair.size[0], proposal_pair.size[1])
spt_feat = torch.from_numpy(spt_feat).to(boxes_subj.device)
spt_feats.append(spt_feat)
spt_feats = torch.cat(spt_feats, 0).float()
spt_feats = self.model(spt_feats)
return spt_feats
def build_spatial_feature(cfg, dim=0):
return SpatialFeature(cfg, dim)
| 2,141 | 41.84 | 142 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/reldn/visual.py | import torch
import torch.nn as nn
class VisualFeature(nn.Module):
def __init__(self, dim):
self.subj_branch = nn.Sequential(nn.Linear())
def forward(self, subj_feat, obj_feat, rel_feat):
pass
def build_visual_feature(cfg, in_channels):
return VisualFeature(cfg, in_channels)
| 307 | 22.692308 | 53 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/msdn/msdn_base.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
import pdb
class Message_Passing_Unit_v2(nn.Module):
def __init__(self, fea_size, filter_size = 128):
super(Message_Passing_Unit_v2, self).__init__()
self.w = nn.Linear(fea_size, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, unary_term, pair_term):
if unary_term.size()[0] == 1 and pair_term.size()[0] > 1:
unary_term = unary_term.expand(pair_term.size()[0], unary_term.size()[1])
if unary_term.size()[0] > 1 and pair_term.size()[0] == 1:
pair_term = pair_term.expand(unary_term.size()[0], pair_term.size()[1])
# print '[unary_term, pair_term]', [unary_term, pair_term]
gate = self.w(F.relu(unary_term)) * self.w(F.relu(pair_term))
gate = torch.sigmoid(gate.sum(1))
# print 'gate', gate
output = pair_term * gate.expand(gate.size()[0], pair_term.size()[1])
return output
class Message_Passing_Unit_v1(nn.Module):
def __init__(self, fea_size, filter_size = 128):
super(Message_Passing_Unit_v1, self).__init__()
self.w = nn.Linear(fea_size * 2, filter_size, bias=True)
self.fea_size = fea_size
self.filter_size = filter_size
def forward(self, unary_term, pair_term):
if unary_term.size()[0] == 1 and pair_term.size()[0] > 1:
unary_term = unary_term.expand(pair_term.size()[0], unary_term.size()[1])
if unary_term.size()[0] > 1 and pair_term.size()[0] == 1:
pair_term = pair_term.expand(unary_term.size()[0], pair_term.size()[1])
# print '[unary_term, pair_term]', [unary_term, pair_term]
gate = torch.cat([unary_term, pair_term], 1)
gate = F.relu(gate)
gate = torch.sigmoid(self.w(gate)).mean(1)
# print 'gate', gate
output = pair_term * gate.view(-1, 1).expand(gate.size()[0], pair_term.size()[1])
return output
class Gated_Recurrent_Unit(nn.Module):
def __init__(self, fea_size, dropout):
super(Gated_Recurrent_Unit, self).__init__()
self.wih = nn.Linear(fea_size, fea_size, bias=True)
self.whh = nn.Linear(fea_size, fea_size, bias=True)
self.dropout = dropout
def forward(self, input, hidden):
output = self.wih(F.relu(input)) + self.whh(F.relu(hidden))
if self.dropout:
output = F.dropout(output, training=self.training)
return output
class MSDN_BASE(nn.Module):
def __init__(self, fea_size, dropout=False, gate_width=128, use_region=False, use_kernel_function=False):
super(MSDN_BASE, self).__init__()
#self.w_object = Parameter()
if use_kernel_function:
Message_Passing_Unit = Message_Passing_Unit_v2
else:
Message_Passing_Unit = Message_Passing_Unit_v1
self.gate_sub2pred = Message_Passing_Unit(fea_size, gate_width)
self.gate_obj2pred = Message_Passing_Unit(fea_size, gate_width)
self.gate_pred2sub = Message_Passing_Unit(fea_size, gate_width)
self.gate_pred2obj = Message_Passing_Unit(fea_size, gate_width)
self.GRU_object = Gated_Recurrent_Unit(fea_size, dropout) # nn.GRUCell(fea_size, fea_size) #
self.GRU_pred = Gated_Recurrent_Unit(fea_size, dropout)
def forward(self, feature_obj, feature_phrase, feature_region, mps_object, mps_phrase, mps_region):
raise Exception('Please implement the forward function')
# Here, we do all the operations outof loop, the loop is just to combine the features
# Less kernel evoke frequency improve the speed of the model
def prepare_message(self, target_features, source_features, select_mat, gate_module):
feature_data = []
# transfer_list = np.where(select_mat > 0)
if select_mat.data.sum() == 0:
temp = Variable(torch.zeros(target_features.size()[1:]), requires_grad=True).type_as(target_features)
feature_data.append(temp)
else:
transfer_list = (select_mat.data > 0).nonzero()
source_indices = Variable(transfer_list[:, 1])
target_indices = Variable(transfer_list[:, 0])
source_f = torch.index_select(source_features, 0, source_indices)
target_f = torch.index_select(target_features, 0, target_indices)
transferred_features = gate_module(target_f, source_f)
for f_id in range(target_features.size()[0]):
if select_mat[f_id, :].data.sum() > 0:
feature_indices = (transfer_list[:, 0] == f_id).nonzero()[0]
indices = Variable(feature_indices)
features = torch.index_select(transferred_features, 0, indices).mean(0).view(-1)
feature_data.append(features)
else:
temp = Variable(torch.zeros(target_features.size()[1:]), requires_grad=True).type_as(target_features)
feature_data.append(temp)
return torch.stack(feature_data, 0)
| 4,594 | 37.291667 | 106 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/msdn/msdn.py | # MSDN for scene graph generation
# Reimnplemetned by Jianwei Yang (jw2yang@gatech.edu)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
from .msdn_base import MSDN_BASE
from ..roi_relation_feature_extractors import make_roi_relation_feature_extractor
from ..roi_relation_box_predictors import make_roi_relation_box_predictor
from ..roi_relation_predictors import make_roi_relation_predictor
class MSDN(MSDN_BASE):
def __init__(self, cfg, in_channels, dim=1024, dropout=False, gate_width=128, use_kernel_function=False):
super(MSDN, self).__init__(dim, dropout, gate_width, use_region=True, use_kernel_function=use_kernel_function)
self.cfg = cfg
self.dim = dim
self.update_step = cfg.MODEL.ROI_RELATION_HEAD.MSDN_FEATURE_UPDATE_STEP
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.pred_feature_extractor = make_roi_relation_feature_extractor(cfg, in_channels)
self.obj_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
self.rel_embedding = nn.Sequential(
nn.Linear(self.pred_feature_extractor.out_channels, self.dim),
nn.ReLU(True),
nn.Linear(self.dim, self.dim),
)
self.obj_predictor = make_roi_relation_box_predictor(cfg, dim)
self.pred_predictor = make_roi_relation_predictor(cfg, dim)
def _get_map_idxs(self, proposals, proposal_pairs):
rel_inds = []
offset = 0
for proposal, proposal_pair in zip(proposals, proposal_pairs):
rel_ind_i = proposal_pair.get_field("idx_pairs").detach()
rel_ind_i += offset
offset += len(proposal)
rel_inds.append(rel_ind_i)
rel_inds = torch.cat(rel_inds, 0)
subj_pred_map = rel_inds.new(sum([len(proposal) for proposal in proposals]), rel_inds.shape[0]).fill_(0).float().detach()
obj_pred_map = rel_inds.new(sum([len(proposal) for proposal in proposals]), rel_inds.shape[0]).fill_(0).float().detach()
subj_pred_map.scatter_(0, (rel_inds[:, 0].contiguous().view(1, -1)), 1)
obj_pred_map.scatter_(0, (rel_inds[:, 1].contiguous().view(1, -1)), 1)
return rel_inds, subj_pred_map, obj_pred_map
def forward(self, features, proposals, proposal_pairs):
rel_inds, subj_pred_map, obj_pred_map = self._get_map_idxs(proposals, proposal_pairs)
x_obj = torch.cat([proposal.get_field("features").detach() for proposal in proposals], 0)
x_pred, _ = self.pred_feature_extractor(features, proposals, proposal_pairs)
x_pred = self.avgpool(x_pred)
x_obj = x_obj.view(x_obj.size(0), -1); x_pred = x_pred.view(x_pred.size(0), -1)
x_obj = self.obj_embedding(x_obj); x_pred = self.rel_embedding(x_pred)
x_obj = [x_obj]
x_pred = [x_pred]
for t in range(self.update_step):
'''update object features'''
object_sub = self.prepare_message(x_obj[t], x_pred[t], subj_pred_map, self.gate_pred2sub)
object_obj = self.prepare_message(x_obj[t], x_pred[t], obj_pred_map, self.gate_pred2obj)
GRU_input_feature_object = (object_sub + object_obj) / 2.
x_obj.append(x_obj[t] + self.GRU_object(GRU_input_feature_object, x_obj[t]))
'''update predicate features'''
indices_sub = rel_inds[:, 0]
indices_obj = rel_inds[:, 1]
feat_sub2pred = torch.index_select(x_obj[t], 0, indices_sub)
feat_obj2pred = torch.index_select(x_obj[t], 0, indices_obj)
phrase_sub = self.gate_sub2pred(x_pred[t], feat_sub2pred)
phrase_obj = self.gate_obj2pred(x_pred[t], feat_obj2pred)
GRU_input_feature_phrase = phrase_sub / 2. + phrase_obj / 2.
x_pred.append(x_pred[t] + self.GRU_pred(GRU_input_feature_phrase, x_pred[t]))
'''compute results and losses'''
# final classifier that converts the features into predictions
# for object prediction, we do not do bbox regression again
obj_class_logits = self.obj_predictor(x_obj[-1].unsqueeze(2).unsqueeze(3))
pred_class_logits = self.pred_predictor(x_pred[-1].unsqueeze(2).unsqueeze(3))
if obj_class_logits is None:
logits = torch.cat([proposal.get_field("logits") for proposal in proposals], 0)
obj_class_labels = logits[:, 1:].max(1)[1] + 1
else:
obj_class_labels = obj_class_logits[:, 1:].max(1)[1] + 1
return (x_obj[-1], x_pred[-1]), obj_class_logits, pred_class_logits, obj_class_labels, rel_inds
def build_msdn_model(cfg,in_channels):
return MSDN(cfg, in_channels)
| 4,365 | 42.227723 | 123 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/relpn/utils.py | import torch
def box_pos_encoder(bboxes, width, height):
"""
bounding box encoding
"""
bboxes_enc = bboxes.clone()
dim0 = bboxes_enc[:, 0] / width
dim1 = bboxes_enc[:, 1] / height
dim2 = bboxes_enc[:, 2] / width
dim3 = bboxes_enc[:, 3] / height
dim4 = (bboxes_enc[:, 2] - bboxes_enc[:, 0]) * (bboxes_enc[:, 3] - bboxes_enc[:, 1]) / height / width
dim5 = (bboxes_enc[:, 3] - bboxes_enc[:, 1]) / (bboxes_enc[:, 2] - bboxes_enc[:, 0] + 1)
return torch.stack((dim0,dim1,dim2,dim3,dim4,dim5), 1)
| 538 | 30.705882 | 105 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/relpn/relpn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.scene_parser.rcnn.modeling.box_coder import BoxCoder
from lib.scene_parser.rcnn.modeling.matcher import Matcher
from lib.scene_parser.rcnn.modeling.pair_matcher import PairMatcher
from lib.scene_parser.rcnn.structures.boxlist_ops import boxlist_iou
from lib.scene_parser.rcnn.structures.bounding_box_pair import BoxPairList
from lib.scene_parser.rcnn.modeling.balanced_positive_negative_pair_sampler import (
BalancedPositiveNegativePairSampler
)
from lib.scene_parser.rcnn.modeling.utils import cat
from .relationshipness import Relationshipness
from .relationshipness import Relationshipnessv2
class RelPN(nn.Module):
def __init__(
self,
cfg,
proposal_matcher,
fg_bg_pair_sampler,
box_coder,
cls_agnostic_bbox_reg=False,
use_matched_pairs_only=False,
minimal_matched_pairs=0,
):
super(RelPN, self).__init__()
self.cfg = cfg
self.proposal_pair_matcher = proposal_matcher
self.fg_bg_pair_sampler = fg_bg_pair_sampler
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.use_matched_pairs_only = use_matched_pairs_only
self.minimal_matched_pairs = minimal_matched_pairs
self.relationshipness = Relationshipness(self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES, pos_encoding=True)
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
temp = []
target_box_pairs = []
for i in range(match_quality_matrix.shape[0]):
for j in range(match_quality_matrix.shape[0]):
match_i = match_quality_matrix[i].view(-1, 1)
match_j = match_quality_matrix[j].view(1, -1)
match_ij = ((match_i + match_j) / 2)
# rmeove duplicate index
match_ij = match_ij.view(-1) # [::match_quality_matrix.shape[1]] = 0
# non_duplicate_idx = (torch.eye(match_ij.shape[0]).view(-1) == 0).nonzero().view(-1).to(match_ij.device)
# match_ij = match_ij[non_duplicate_idx]
temp.append(match_ij)
boxi = target.bbox[i]; boxj = target.bbox[j]
box_pair = torch.cat((boxi, boxj), 0)
target_box_pairs.append(box_pair)
match_pair_quality_matrix = torch.stack(temp, 0).view(len(temp), -1)
target_box_pairs = torch.stack(target_box_pairs, 0)
target_pair = BoxPairList(target_box_pairs, target.size, target.mode)
target_pair.add_field("labels", target.get_field("pred_labels").view(-1))
box_subj = proposal.bbox
box_obj = proposal.bbox
box_subj = box_subj.unsqueeze(1).repeat(1, box_subj.shape[0], 1)
box_obj = box_obj.unsqueeze(0).repeat(box_obj.shape[0], 1, 1)
proposal_box_pairs = torch.cat((box_subj.view(-1, 4), box_obj.view(-1, 4)), 1)
idx_subj = torch.arange(box_subj.shape[0]).view(-1, 1, 1).repeat(1, box_obj.shape[0], 1).to(proposal.bbox.device)
idx_obj = torch.arange(box_obj.shape[0]).view(1, -1, 1).repeat(box_subj.shape[0], 1, 1).to(proposal.bbox.device)
proposal_idx_pairs = torch.cat((idx_subj.view(-1, 1), idx_obj.view(-1, 1)), 1)
# non_duplicate_idx = (proposal_idx_pairs[:, 0] != proposal_idx_pairs[:, 1]).nonzero()
# proposal_box_pairs = proposal_box_pairs[non_duplicate_idx.view(-1)]
# proposal_idx_pairs = proposal_idx_pairs[non_duplicate_idx.view(-1)]
proposal_pairs = BoxPairList(proposal_box_pairs, proposal.size, proposal.mode)
proposal_pairs.add_field("idx_pairs", proposal_idx_pairs)
# matched_idxs = self.proposal_matcher(match_quality_matrix)
matched_idxs = self.proposal_pair_matcher(match_pair_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
# target = target.copy_with_fields("pred_labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
if self.use_matched_pairs_only and \
(matched_idxs >= 0).sum() > self.minimal_matched_pairs:
# filter all matched_idxs < 0
proposal_pairs = proposal_pairs[matched_idxs >= 0]
matched_idxs = matched_idxs[matched_idxs >= 0]
matched_targets = target_pair[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets, proposal_pairs
def prepare_targets(self, proposals, targets):
labels = []
proposal_pairs = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets, proposal_pairs_per_image = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
# regression_targets_per_image = self.box_coder.encode(
# matched_targets.bbox, proposals_per_image.bbox
# )
labels.append(labels_per_image)
proposal_pairs.append(proposal_pairs_per_image)
# regression_targets.append(regression_targets_per_image)
return labels, proposal_pairs
def _relpnsample_train(self, proposals, targets):
"""
perform relpn based sampling during training
"""
labels, proposal_pairs = self.prepare_targets(proposals, targets)
proposal_pairs = list(proposal_pairs)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, proposal_pairs_per_image in zip(
labels, proposal_pairs
):
proposal_pairs_per_image.add_field("labels", labels_per_image)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_pair_sampler(labels)
losses = 0
for img_idx, (proposals_per_image, pos_inds_img, neg_inds_img) in \
enumerate(zip(proposals, sampled_pos_inds, sampled_neg_inds)):
obj_logits = proposals_per_image.get_field('logits')
obj_bboxes = proposals_per_image.bbox
relness = self.relationshipness(obj_logits, obj_bboxes, proposals_per_image.size)
# nondiag = (1 - torch.eye(obj_logits.shape[0]).to(relness.device)).view(-1)
# relness = relness.view(-1)[nondiag.nonzero()]
relness_sorted, order = torch.sort(relness.view(-1), descending=True)
# ious = boxlist_iou(proposals_per_image, proposals_per_image)
# subj_ids = []; obj_ids = []
# sample_ids = []; id = 0
# while len(sample_ids) < self.cfg.MODEL.ROI_RELATION_HEAD.BATCH_SIZE_PER_IMAGE and id < len(order):
# subj_id = order[id] / len(proposals_per_image)
# obj_id = order[id] % len(proposals_per_image)
#
# if len(subj_ids) == 0 and len(obj_ids) == 0 and subj_id != obj_id:
# subj_ids.append(subj_id.item())
# obj_ids.append(obj_id.item())
# sample_ids.append(id)
# else:
# subj_ious = ious[subj_id, subj_ids]
# obj_ious = ious[obj_id, obj_ids]
# if (subj_ious.max() < 0.9 or obj_ious.max() < 0.9) and subj_id != obj_id:
# subj_ids.append(subj_id.item())
# obj_ids.append(obj_id.item())
# sample_ids.append(id)
# id += 1
# img_sampled_inds = order[sample_ids]
img_sampled_inds = order[:self.cfg.MODEL.ROI_RELATION_HEAD.BATCH_SIZE_PER_IMAGE].view(-1)
proposal_pairs_per_image = proposal_pairs[img_idx][img_sampled_inds]
proposal_pairs[img_idx] = proposal_pairs_per_image
# import pdb; pdb.set_trace()
# img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
# relness = relness[img_sampled_inds]
# pos_labels = torch.ones(len(pos_inds_img.nonzero()))
# neg_labels = torch.zeros(len(neg_inds_img.nonzero()))
# rellabels = torch.cat((pos_labels, neg_labels), 0).view(-1, 1)
# losses += F.binary_cross_entropy(relness, rellabels.to(relness.device))
losses += F.binary_cross_entropy(relness.view(-1, 1), (labels[img_idx] > 0).view(-1, 1).float())
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
# for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
# zip(sampled_pos_inds, sampled_neg_inds)
# ):
# img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
# proposal_pairs_per_image = proposal_pairs[img_idx][img_sampled_inds]
# proposal_pairs[img_idx] = proposal_pairs_per_image
self._proposal_pairs = proposal_pairs
return proposal_pairs, losses
def _fullsample_test(self, proposals):
"""
This method get all subject-object pairs, and return the proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
"""
proposal_pairs = []
for i, proposals_per_image in enumerate(proposals):
box_subj = proposals_per_image.bbox
box_obj = proposals_per_image.bbox
box_subj = box_subj.unsqueeze(1).repeat(1, box_subj.shape[0], 1)
box_obj = box_obj.unsqueeze(0).repeat(box_obj.shape[0], 1, 1)
proposal_box_pairs = torch.cat((box_subj.view(-1, 4), box_obj.view(-1, 4)), 1)
idx_subj = torch.arange(box_subj.shape[0]).view(-1, 1, 1).repeat(1, box_obj.shape[0], 1).to(proposals_per_image.bbox.device)
idx_obj = torch.arange(box_obj.shape[0]).view(1, -1, 1).repeat(box_subj.shape[0], 1, 1).to(proposals_per_image.bbox.device)
proposal_idx_pairs = torch.cat((idx_subj.view(-1, 1), idx_obj.view(-1, 1)), 1)
keep_idx = (proposal_idx_pairs[:, 0] != proposal_idx_pairs[:, 1]).nonzero().view(-1)
# if we filter non overlap bounding boxes
if self.cfg.MODEL.ROI_RELATION_HEAD.FILTER_NON_OVERLAP:
ious = boxlist_iou(proposals_per_image, proposals_per_image).view(-1)
ious = ious[keep_idx]
keep_idx = keep_idx[(ious > 0).nonzero().view(-1)]
proposal_idx_pairs = proposal_idx_pairs[keep_idx]
proposal_box_pairs = proposal_box_pairs[keep_idx]
proposal_pairs_per_image = BoxPairList(proposal_box_pairs, proposals_per_image.size, proposals_per_image.mode)
proposal_pairs_per_image.add_field("idx_pairs", proposal_idx_pairs)
proposal_pairs.append(proposal_pairs_per_image)
return proposal_pairs
def _relpnsample_test(self, proposals):
"""
perform relpn based sampling during testing
"""
proposals[0] = proposals[0]
proposal_pairs = self._fullsample_test(proposals)
proposal_pairs = list(proposal_pairs)
relnesses = []
for img_idx, proposals_per_image in enumerate(proposals):
obj_logits = proposals_per_image.get_field('logits')
obj_bboxes = proposals_per_image.bbox
relness = self.relationshipness(obj_logits, obj_bboxes, proposals_per_image.size)
keep_idx = (1 - torch.eye(obj_logits.shape[0]).to(relness.device)).view(-1).nonzero().view(-1)
if self.cfg.MODEL.ROI_RELATION_HEAD.FILTER_NON_OVERLAP:
ious = boxlist_iou(proposals_per_image, proposals_per_image).view(-1)
ious = ious[keep_idx]
keep_idx = keep_idx[(ious > 0).nonzero().view(-1)]
relness = relness.view(-1)[keep_idx]
relness_sorted, order = torch.sort(relness.view(-1), descending=True)
# perform co-nms to filter duplicate bounding boxes
# ious = boxlist_iou(proposals_per_image, proposals_per_image)
# subj_ids = []; obj_ids = []
# sample_ids = []; id = 0
# while len(sample_ids) < self.cfg.MODEL.ROI_RELATION_HEAD.BATCH_SIZE_PER_IMAGE and id < len(order):
# subj_id = order[id] / len(proposals_per_image)
# obj_id = order[id] % len(proposals_per_image)
#
# if len(subj_ids) == 0 and len(obj_ids) == 0 and subj_id != obj_id:
# subj_ids.append(subj_id.item())
# obj_ids.append(obj_id.item())
# sample_ids.append(id)
# else:
# subj_ious = ious[subj_id, subj_ids]
# obj_ious = ious[obj_id, obj_ids]
# if (subj_ious.max() < 0.9 or obj_ious.max() < 0.9) and subj_id != obj_id:
# subj_ids.append(subj_id.item())
# obj_ids.append(obj_id.item())
# sample_ids.append(id)
# id += 1
# img_sampled_inds = order[sample_ids]
# relness = relness_sorted[sample_ids]
img_sampled_inds = order[:self.cfg.MODEL.ROI_RELATION_HEAD.BATCH_SIZE_PER_IMAGE].view(-1)
relness = relness_sorted[:self.cfg.MODEL.ROI_RELATION_HEAD.BATCH_SIZE_PER_IMAGE].view(-1)
proposal_pairs_per_image = proposal_pairs[img_idx][img_sampled_inds]
proposal_pairs[img_idx] = proposal_pairs_per_image
relnesses.append(relness)
self._proposal_pairs = proposal_pairs
return proposal_pairs, relnesses
def forward(self, proposals, targets=None):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
if self.training:
return self._relpnsample_train(proposals, targets)
else:
return self._relpnsample_test(proposals)
def pred_classification_loss(self, class_logits, freq_prior=None):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
Returns:
classification_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
device = class_logits.device
if not hasattr(self, "_proposal_pairs"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposal_pairs
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
rel_fg_cnt = len(labels.nonzero())
rel_bg_cnt = labels.shape[0] - rel_fg_cnt
ce_weights = labels.new(class_logits.size(1)).fill_(1).float()
ce_weights[0] = float(rel_fg_cnt) / (rel_bg_cnt + 1e-5)
classification_loss = F.cross_entropy(class_logits, labels, weight=ce_weights)
# add an auxilary loss to mine some positive relationship pairs
# class_probs = torch.log_softmax(class_logits[:, 1:], dim=-1)
# freq_probs = torch.softmax(freq_prior[:, 1:], dim=-1)
# klloss = F.kl_div(class_probs, freq_probs, reduction='batchmean')
#
# classification_loss += klloss
# class_probs = torch.softmax(class_logits, dim=-1).detach()
# freq_labels = freq_prior.argmax(1)
# pred_labels = class_probs[:, 1:].argmax(1) + 1
# match_idx = (freq_labels == pred_labels).nonzero().view(-1)
# keep_idx = (labels[match_idx] == 0).nonzero().view(-1)
# match_idx = match_idx[keep_idx]
# if match_idx.numel() > 0:
# labels_mined = freq_labels[match_idx]
# class_logits_mined = class_logits[match_idx]
# # weights = labels.new(class_logits.size(0)).fill_(1).float()
# weights = class_probs.max(1)[0][match_idx].detach()
# classification_loss += (weights * F.cross_entropy(class_logits_mined, labels_mined, weight=ce_weights, reduction='none')).mean()
return classification_loss
def make_relation_proposal_network(cfg):
matcher = PairMatcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativePairSampler(
cfg.MODEL.ROI_RELATION_HEAD.BATCH_SIZE_PER_IMAGE,
cfg.MODEL.ROI_RELATION_HEAD.POSITIVE_FRACTION
)
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
relpn = RelPN(
cfg,
matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg
)
return relpn
| 17,740 | 45.080519 | 142 | py |
graph-rcnn.pytorch | graph-rcnn.pytorch-master/lib/scene_parser/rcnn/modeling/relation_heads/relpn/relationshipness.py | import torch
import torch.nn as nn
from .utils import box_pos_encoder
from ..auxilary.multi_head_att import MultiHeadAttention
class Relationshipness(nn.Module):
"""
compute relationshipness between subjects and objects
"""
def __init__(self, dim, pos_encoding=False):
super(Relationshipness, self).__init__()
self.subj_proj = nn.Sequential(
nn.Linear(dim, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
self.obj_prof = nn.Sequential(
nn.Linear(dim, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
self.pos_encoding = False
if pos_encoding:
self.pos_encoding = True
self.sub_pos_encoder = nn.Sequential(
nn.Linear(6, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
self.obj_pos_encoder = nn.Sequential(
nn.Linear(6, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
def forward(self, x, bbox=None, imsize=None):
x_subj = self.subj_proj(x) # k x 64
x_obj = self.obj_prof(x) # k x 64
scores = torch.mm(x_subj, x_obj.t()) # k x k
if self.pos_encoding:
pos = box_pos_encoder(bbox, imsize[0], imsize[1])
pos_subj = self.sub_pos_encoder(pos)
pos_obj = self.obj_pos_encoder(pos)
pos_scores = torch.mm(pos_subj, pos_obj.t()) # k x k
scores = scores + pos_scores
relness = torch.sigmoid(scores) # k x k
return relness
class Relationshipnessv2(nn.Module):
"""
compute relationshipness between subjects and objects
"""
def __init__(self, dim, pos_encoding=False):
super(Relationshipnessv2, self).__init__()
self.subj_proj = nn.Sequential(
nn.Linear(dim, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
self.obj_proj = nn.Sequential(
nn.Linear(dim, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
self.pos_encoding = False
if pos_encoding:
self.pos_encoding = True
self.sub_pos_encoder = nn.Sequential(
nn.Linear(6, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
self.obj_pos_encoder = nn.Sequential(
nn.Linear(6, 64),
nn.ReLU(True),
nn.Linear(64, 64)
)
# using context to modulate the relationshipness scores
self.self_att_subj = MultiHeadAttention(8, 64)
self.self_att_obj = MultiHeadAttention(8, 64)
self.self_att_pos_subj = MultiHeadAttention(8, 64)
self.self_att_pos_obj = MultiHeadAttention(8, 64)
def forward(self, x, bbox=None, imsize=None):
x_subj = self.subj_proj(x) # k x 64
x_subj = self.self_att_subj(x_subj, x_subj, x_subj).squeeze(1)
x_obj = self.obj_proj(x) # k x 64
x_obj = self.self_att_obj(x_obj, x_obj, x_obj).squeeze(1)
scores = torch.mm(x_subj, x_obj.t()) # k x k
if self.pos_encoding:
pos = box_pos_encoder(bbox, imsize[0], imsize[1])
pos_subj = self.sub_pos_encoder(pos)
pos_subj = self.self_att_pos_subj(pos_subj, pos_subj, pos_subj).squeeze(1)
pos_obj = self.obj_pos_encoder(pos)
pos_obj = self.self_att_pos_obj(pos_obj, pos_obj, pos_obj).squeeze(1)
pos_scores = torch.mm(pos_subj, pos_obj.t()) # k x k
scores = scores + pos_scores
relness = torch.sigmoid(scores) # k x k
return relness
| 3,681 | 31.298246 | 86 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.