repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
stylegan3 | stylegan3-main/train.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Alias-Free Generative Adversarial Networks"."""
import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
#----------------------------------------------------------------------------
def subprocess_fn(rank, c, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(c.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if c.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=c.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=c.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if c.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **c)
#----------------------------------------------------------------------------
def launch_training(c, desc, outdir, dry_run):
dnnlib.util.Logger(should_flush=True)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
c.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{desc}')
assert not os.path.exists(c.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(c, indent=2))
print()
print(f'Output directory: {c.run_dir}')
print(f'Number of GPUs: {c.num_gpus}')
print(f'Batch size: {c.batch_size} images')
print(f'Training duration: {c.total_kimg} kimg')
print(f'Dataset path: {c.training_set_kwargs.path}')
print(f'Dataset size: {c.training_set_kwargs.max_size} images')
print(f'Dataset resolution: {c.training_set_kwargs.resolution}')
print(f'Dataset labels: {c.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {c.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(c.run_dir)
with open(os.path.join(c.run_dir, 'training_options.json'), 'wt') as f:
json.dump(c, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if c.num_gpus == 1:
subprocess_fn(rank=0, c=c, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(c, temp_dir), nprocs=c.num_gpus)
#----------------------------------------------------------------------------
def init_dataset_kwargs(data):
try:
dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs) # Subclass of training.dataset.Dataset.
dataset_kwargs.resolution = dataset_obj.resolution # Be explicit about resolution.
dataset_kwargs.use_labels = dataset_obj.has_labels # Be explicit about labels.
dataset_kwargs.max_size = len(dataset_obj) # Be explicit about dataset size.
return dataset_kwargs, dataset_obj.name
except IOError as err:
raise click.ClickException(f'--data: {err}')
#----------------------------------------------------------------------------
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
@click.command()
# Required.
@click.option('--outdir', help='Where to save the results', metavar='DIR', required=True)
@click.option('--cfg', help='Base configuration', type=click.Choice(['stylegan3-t', 'stylegan3-r', 'stylegan2']), required=True)
@click.option('--data', help='Training data', metavar='[ZIP|DIR]', type=str, required=True)
@click.option('--gpus', help='Number of GPUs to use', metavar='INT', type=click.IntRange(min=1), required=True)
@click.option('--batch', help='Total batch size', metavar='INT', type=click.IntRange(min=1), required=True)
@click.option('--gamma', help='R1 regularization weight', metavar='FLOAT', type=click.FloatRange(min=0), required=True)
# Optional features.
@click.option('--cond', help='Train conditional model', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--mirror', help='Enable dataset x-flips', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--aug', help='Augmentation mode', type=click.Choice(['noaug', 'ada', 'fixed']), default='ada', show_default=True)
@click.option('--resume', help='Resume from given network pickle', metavar='[PATH|URL]', type=str)
@click.option('--freezed', help='Freeze first layers of D', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
# Misc hyperparameters.
@click.option('--p', help='Probability for --aug=fixed', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.2, show_default=True)
@click.option('--target', help='Target value for --aug=ada', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.6, show_default=True)
@click.option('--batch-gpu', help='Limit batch size per GPU', metavar='INT', type=click.IntRange(min=1))
@click.option('--cbase', help='Capacity multiplier', metavar='INT', type=click.IntRange(min=1), default=32768, show_default=True)
@click.option('--cmax', help='Max. feature maps', metavar='INT', type=click.IntRange(min=1), default=512, show_default=True)
@click.option('--glr', help='G learning rate [default: varies]', metavar='FLOAT', type=click.FloatRange(min=0))
@click.option('--dlr', help='D learning rate', metavar='FLOAT', type=click.FloatRange(min=0), default=0.002, show_default=True)
@click.option('--map-depth', help='Mapping network depth [default: varies]', metavar='INT', type=click.IntRange(min=1))
@click.option('--mbstd-group', help='Minibatch std group size', metavar='INT', type=click.IntRange(min=1), default=4, show_default=True)
# Misc settings.
@click.option('--desc', help='String to include in result dir name', metavar='STR', type=str)
@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True)
@click.option('--kimg', help='Total training duration', metavar='KIMG', type=click.IntRange(min=1), default=25000, show_default=True)
@click.option('--tick', help='How often to print progress', metavar='KIMG', type=click.IntRange(min=1), default=4, show_default=True)
@click.option('--snap', help='How often to save snapshots', metavar='TICKS', type=click.IntRange(min=1), default=50, show_default=True)
@click.option('--seed', help='Random seed', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
@click.option('--fp32', help='Disable mixed-precision', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--nobench', help='Disable cuDNN benchmarking', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--workers', help='DataLoader worker processes', metavar='INT', type=click.IntRange(min=1), default=3, show_default=True)
@click.option('-n','--dry-run', help='Print training options and exit', is_flag=True)
def main(**kwargs):
"""Train a GAN using the techniques described in the paper
"Alias-Free Generative Adversarial Networks".
Examples:
\b
# Train StyleGAN3-T for AFHQv2 using 8 GPUs.
python train.py --outdir=~/training-runs --cfg=stylegan3-t --data=~/datasets/afhqv2-512x512.zip \\
--gpus=8 --batch=32 --gamma=8.2 --mirror=1
\b
# Fine-tune StyleGAN3-R for MetFaces-U using 1 GPU, starting from the pre-trained FFHQ-U pickle.
python train.py --outdir=~/training-runs --cfg=stylegan3-r --data=~/datasets/metfacesu-1024x1024.zip \\
--gpus=8 --batch=32 --gamma=6.6 --mirror=1 --kimg=5000 --snap=5 \\
--resume=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-1024x1024.pkl
\b
# Train StyleGAN2 for FFHQ at 1024x1024 resolution using 8 GPUs.
python train.py --outdir=~/training-runs --cfg=stylegan2 --data=~/datasets/ffhq-1024x1024.zip \\
--gpus=8 --batch=32 --gamma=10 --mirror=1 --aug=noaug
"""
# Initialize config.
opts = dnnlib.EasyDict(kwargs) # Command line arguments.
c = dnnlib.EasyDict() # Main config dict.
c.G_kwargs = dnnlib.EasyDict(class_name=None, z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict())
c.D_kwargs = dnnlib.EasyDict(class_name='training.networks_stylegan2.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
c.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0,0.99], eps=1e-8)
c.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0,0.99], eps=1e-8)
c.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss')
c.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, prefetch_factor=2)
# Training set.
c.training_set_kwargs, dataset_name = init_dataset_kwargs(data=opts.data)
if opts.cond and not c.training_set_kwargs.use_labels:
raise click.ClickException('--cond=True requires labels specified in dataset.json')
c.training_set_kwargs.use_labels = opts.cond
c.training_set_kwargs.xflip = opts.mirror
# Hyperparameters & settings.
c.num_gpus = opts.gpus
c.batch_size = opts.batch
c.batch_gpu = opts.batch_gpu or opts.batch // opts.gpus
c.G_kwargs.channel_base = c.D_kwargs.channel_base = opts.cbase
c.G_kwargs.channel_max = c.D_kwargs.channel_max = opts.cmax
c.G_kwargs.mapping_kwargs.num_layers = (8 if opts.cfg == 'stylegan2' else 2) if opts.map_depth is None else opts.map_depth
c.D_kwargs.block_kwargs.freeze_layers = opts.freezed
c.D_kwargs.epilogue_kwargs.mbstd_group_size = opts.mbstd_group
c.loss_kwargs.r1_gamma = opts.gamma
c.G_opt_kwargs.lr = (0.002 if opts.cfg == 'stylegan2' else 0.0025) if opts.glr is None else opts.glr
c.D_opt_kwargs.lr = opts.dlr
c.metrics = opts.metrics
c.total_kimg = opts.kimg
c.kimg_per_tick = opts.tick
c.image_snapshot_ticks = c.network_snapshot_ticks = opts.snap
c.random_seed = c.training_set_kwargs.random_seed = opts.seed
c.data_loader_kwargs.num_workers = opts.workers
# Sanity checks.
if c.batch_size % c.num_gpus != 0:
raise click.ClickException('--batch must be a multiple of --gpus')
if c.batch_size % (c.num_gpus * c.batch_gpu) != 0:
raise click.ClickException('--batch must be a multiple of --gpus times --batch-gpu')
if c.batch_gpu < c.D_kwargs.epilogue_kwargs.mbstd_group_size:
raise click.ClickException('--batch-gpu cannot be smaller than --mbstd')
if any(not metric_main.is_valid_metric(metric) for metric in c.metrics):
raise click.ClickException('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
# Base configuration.
c.ema_kimg = c.batch_size * 10 / 32
if opts.cfg == 'stylegan2':
c.G_kwargs.class_name = 'training.networks_stylegan2.Generator'
c.loss_kwargs.style_mixing_prob = 0.9 # Enable style mixing regularization.
c.loss_kwargs.pl_weight = 2 # Enable path length regularization.
c.G_reg_interval = 4 # Enable lazy regularization for G.
c.G_kwargs.fused_modconv_default = 'inference_only' # Speed up training by using regular convolutions instead of grouped convolutions.
c.loss_kwargs.pl_no_weight_grad = True # Speed up path length regularization by skipping gradient computation wrt. conv2d weights.
else:
c.G_kwargs.class_name = 'training.networks_stylegan3.Generator'
c.G_kwargs.magnitude_ema_beta = 0.5 ** (c.batch_size / (20 * 1e3))
if opts.cfg == 'stylegan3-r':
c.G_kwargs.conv_kernel = 1 # Use 1x1 convolutions.
c.G_kwargs.channel_base *= 2 # Double the number of feature maps.
c.G_kwargs.channel_max *= 2
c.G_kwargs.use_radial_filters = True # Use radially symmetric downsampling filters.
c.loss_kwargs.blur_init_sigma = 10 # Blur the images seen by the discriminator.
c.loss_kwargs.blur_fade_kimg = c.batch_size * 200 / 32 # Fade out the blur during the first N kimg.
# Augmentation.
if opts.aug != 'noaug':
c.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1)
if opts.aug == 'ada':
c.ada_target = opts.target
if opts.aug == 'fixed':
c.augment_p = opts.p
# Resume.
if opts.resume is not None:
c.resume_pkl = opts.resume
c.ada_kimg = 100 # Make ADA react faster at the beginning.
c.ema_rampup = None # Disable EMA rampup.
c.loss_kwargs.blur_init_sigma = 0 # Disable blur rampup.
# Performance-related toggles.
if opts.fp32:
c.G_kwargs.num_fp16_res = c.D_kwargs.num_fp16_res = 0
c.G_kwargs.conv_clamp = c.D_kwargs.conv_clamp = None
if opts.nobench:
c.cudnn_benchmark = False
# Description string.
desc = f'{opts.cfg:s}-{dataset_name:s}-gpus{c.num_gpus:d}-batch{c.batch_size:d}-gamma{c.loss_kwargs.r1_gamma:g}'
if opts.desc is not None:
desc += f'-{opts.desc}'
# Launch.
launch_training(c=c, desc=desc, outdir=opts.outdir, dry_run=opts.dry_run)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 15,910 | 54.055363 | 210 | py |
stylegan3 | stylegan3-main/calc_metrics.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Calculate quality metrics for previous training run or pretrained network pickle."""
import os
import click
import json
import tempfile
import copy
import torch
import dnnlib
import legacy
from metrics import metric_main
from metrics import metric_utils
from torch_utils import training_stats
from torch_utils import custom_ops
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0 or not args.verbose:
custom_ops.verbosity = 'none'
# Configure torch.
device = torch.device('cuda', rank)
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
conv2d_gradfix.enabled = True
# Print network summary.
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
# Calculate each metric.
for metric in args.metrics:
if rank == 0 and args.verbose:
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if rank == 0 and args.verbose:
print()
# Done.
if rank == 0 and args.verbose:
print('Exiting...')
#----------------------------------------------------------------------------
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH', required=True)
@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True)
@click.option('--data', help='Dataset to evaluate against [default: look up]', metavar='[ZIP|DIR]')
@click.option('--mirror', help='Enable dataset x-flips [default: look up]', type=bool, metavar='BOOL')
@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True)
@click.option('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True)
def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=eqt50k_int,eqr50k \\
--network=~/training-runs/00000-stylegan3-r-mydataset/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq-1024x1024.zip --mirror=1 \\
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl
\b
Recommended metrics:
fid50k_full Frechet inception distance against the full dataset.
kid50k_full Kernel inception distance against the full dataset.
pr50k3_full Precision and recall againt the full dataset.
ppl2_wend Perceptual path length in W, endpoints, full image.
eqt50k_int Equivariance w.r.t. integer translation (EQ-T).
eqt50k_frac Equivariance w.r.t. fractional translation (EQ-T_frac).
eqr50k Equivariance w.r.t. rotation (EQ-R).
\b
Legacy metrics:
fid50k Frechet inception distance against 50k real images.
kid50k Kernel inception distance against 50k real images.
pr50k3 Precision and recall against 50k real images.
is50k Inception score for CIFAR-10.
"""
dnnlib.util.Logger(should_flush=True)
# Validate arguments.
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
if not all(metric_main.is_valid_metric(metric) for metric in args.metrics):
ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
if not args.num_gpus >= 1:
ctx.fail('--gpus must be at least 1')
# Load network.
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
# Initialize dataset options.
if data is not None:
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data)
elif network_dict['training_set_kwargs'] is not None:
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
# Finalize dataset options.
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
if mirror is not None:
args.dataset_kwargs.xflip = mirror
# Print dataset options.
if args.verbose:
print('Dataset options:')
print(json.dumps(args.dataset_kwargs, indent=2))
# Locate run dir.
args.run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
# Launch processes.
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
calc_metrics() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 8,114 | 41.936508 | 154 | py |
stylegan3 | stylegan3-main/training/loss.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Loss functions."""
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import upfirdn2d
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg): # to be overridden by subclass
raise NotImplementedError()
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0):
super().__init__()
self.device = device
self.G = G
self.D = D
self.augment_pipe = augment_pipe
self.r1_gamma = r1_gamma
self.style_mixing_prob = style_mixing_prob
self.pl_weight = pl_weight
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_no_weight_grad = pl_no_weight_grad
self.pl_mean = torch.zeros([], device=device)
self.blur_init_sigma = blur_init_sigma
self.blur_fade_kimg = blur_fade_kimg
def run_G(self, z, c, update_emas=False):
ws = self.G.mapping(z, c, update_emas=update_emas)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, update_emas=False)[:, cutoff:]
img = self.G.synthesis(ws, update_emas=update_emas)
return img, ws
def run_D(self, img, c, blur_sigma=0, update_emas=False):
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
with torch.autograd.profiler.record_function('blur'):
f = torch.arange(-blur_size, blur_size + 1, device=img.device).div(blur_sigma).square().neg().exp2()
img = upfirdn2d.filter2d(img, f / f.sum())
if self.augment_pipe is not None:
img = self.augment_pipe(img)
logits = self.D(img, c, update_emas=update_emas)
return logits
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
if self.pl_weight == 0:
phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase)
if self.r1_gamma == 0:
phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase)
blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0
# Gmain: Maximize logits for generated images.
if phase in ['Gmain', 'Gboth']:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c)
gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if phase in ['Greg', 'Gboth']:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size])
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients(self.pl_no_weight_grad):
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
loss_Gpl.mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if phase in ['Dmain', 'Dboth']:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, update_emas=True)
gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma, update_emas=True)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if phase in ['Dmain', 'Dreg', 'Dboth']:
name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(phase in ['Dreg', 'Dboth'])
real_logits = self.run_D(real_img_tmp, real_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if phase in ['Dmain', 'Dboth']:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if phase in ['Dreg', 'Dboth']:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(loss_Dreal + loss_Dr1).mean().mul(gain).backward()
#----------------------------------------------------------------------------
| 7,999 | 55.737589 | 199 | py |
stylegan3 | stylegan3-main/training/augment.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Augmentation pipeline from the paper
"Training Generative Adversarial Networks with Limited Data".
Matches the original implementation by Karras et al. at
https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py"""
import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
# Coefficients of various wavelet decomposition low-pass filters.
wavelets = {
'haar': [0.7071067811865476, 0.7071067811865476],
'db1': [0.7071067811865476, 0.7071067811865476],
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
}
#----------------------------------------------------------------------------
# Helpers for constructing transformation matrices.
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def translate2d(tx, ty, **kwargs):
return matrix(
[1, 0, tx],
[0, 1, ty],
[0, 0, 1],
**kwargs)
def translate3d(tx, ty, tz, **kwargs):
return matrix(
[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1],
**kwargs)
def scale2d(sx, sy, **kwargs):
return matrix(
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
**kwargs)
def scale3d(sx, sy, sz, **kwargs):
return matrix(
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1],
**kwargs)
def rotate2d(theta, **kwargs):
return matrix(
[torch.cos(theta), torch.sin(-theta), 0],
[torch.sin(theta), torch.cos(theta), 0],
[0, 0, 1],
**kwargs)
def rotate3d(v, theta, **kwargs):
vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2]
s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c
return matrix(
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
[0, 0, 0, 1],
**kwargs)
def translate2d_inv(tx, ty, **kwargs):
return translate2d(-tx, -ty, **kwargs)
def scale2d_inv(sx, sy, **kwargs):
return scale2d(1 / sx, 1 / sy, **kwargs)
def rotate2d_inv(theta, **kwargs):
return rotate2d(-theta, **kwargs)
#----------------------------------------------------------------------------
# Versatile image augmentation pipeline from the paper
# "Training Generative Adversarial Networks with Limited Data".
#
# All augmentations are disabled by default; individual augmentations can
# be enabled by setting their probability multipliers to 1.
@persistence.persistent_class
class AugmentPipe(torch.nn.Module):
def __init__(self,
xflip=0, rotate90=0, xint=0, xint_max=0.125,
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1,
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
):
super().__init__()
self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability.
# Pixel blitting.
self.xflip = float(xflip) # Probability multiplier for x-flip.
self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations.
self.xint = float(xint) # Probability multiplier for integer translation.
self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions.
# General geometric transformations.
self.scale = float(scale) # Probability multiplier for isotropic scaling.
self.rotate = float(rotate) # Probability multiplier for arbitrary rotation.
self.aniso = float(aniso) # Probability multiplier for anisotropic scaling.
self.xfrac = float(xfrac) # Probability multiplier for fractional translation.
self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling.
self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle.
self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling.
self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions.
# Color transformations.
self.brightness = float(brightness) # Probability multiplier for brightness.
self.contrast = float(contrast) # Probability multiplier for contrast.
self.lumaflip = float(lumaflip) # Probability multiplier for luma flip.
self.hue = float(hue) # Probability multiplier for hue rotation.
self.saturation = float(saturation) # Probability multiplier for saturation.
self.brightness_std = float(brightness_std) # Standard deviation of brightness.
self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast.
self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle.
self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation.
# Image-space filtering.
self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering.
self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands.
self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification.
# Image-space corruptions.
self.noise = float(noise) # Probability multiplier for additive RGB noise.
self.cutout = float(cutout) # Probability multiplier for cutout.
self.noise_std = float(noise_std) # Standard deviation of additive RGB noise.
self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions.
# Setup orthogonal lowpass filter for geometric augmentations.
self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
# Construct filter bank for image-space filtering.
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
for i in range(1, Hz_fbank.shape[0]):
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1]
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32))
def forward(self, images, debug_percentile=None):
assert isinstance(images, torch.Tensor) and images.ndim == 4
batch_size, num_channels, height, width = images.shape
device = images.device
if debug_percentile is not None:
debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device)
# -------------------------------------
# Select parameters for pixel blitting.
# -------------------------------------
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
I_3 = torch.eye(3, device=device)
G_inv = I_3
# Apply x-flip with probability (xflip * strength).
if self.xflip > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 2)
i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
# Apply 90 degree rotations with probability (rotate90 * strength).
if self.rotate90 > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 4)
i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 4))
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
# Apply integer translation with probability (xint * strength).
if self.xint > 0:
t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max)
G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height))
# --------------------------------------------------------
# Select parameters for general geometric transformations.
# --------------------------------------------------------
# Apply isotropic scaling with probability (scale * strength).
if self.scale > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std)
s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std))
G_inv = G_inv @ scale2d_inv(s, s)
# Apply pre-rotation with probability p_rot.
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
# Apply anisotropic scaling with probability (aniso * strength).
if self.aniso > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std)
s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std))
G_inv = G_inv @ scale2d_inv(s, 1 / s)
# Apply post-rotation with probability p_rot.
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.zeros_like(theta)
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
# Apply fractional translation with probability (xfrac * strength).
if self.xfrac > 0:
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std)
G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height)
# ----------------------------------
# Execute geometric transformations.
# ----------------------------------
# Execute if the transform is not identity.
if G_inv is not I_3:
# Calculate padding.
cx = (width - 1) / 2
cy = (height - 1) / 2
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz]
cp = G_inv @ cp.t() # [batch, xyz, idx]
Hz_pad = self.Hz_geom.shape[0] // 4
margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx]
margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1]
margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device)
margin = margin.max(misc.constant([0, 0] * 2, device=device))
margin = margin.min(misc.constant([width-1, height-1] * 2, device=device))
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
# Pad image and adjust origin.
images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect')
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
# Upsample.
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
# Execute transformation.
shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device)
grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False)
images = grid_sample_gradfix.grid_sample(images, grid)
# Downsample and crop.
images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
# --------------------------------------------
# Select parameters for color transformations.
# --------------------------------------------
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
I_4 = torch.eye(4, device=device)
C = I_4
# Apply brightness with probability (brightness * strength).
if self.brightness > 0:
b = torch.randn([batch_size], device=device) * self.brightness_std
b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
if debug_percentile is not None:
b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std)
C = translate3d(b, b, b) @ C
# Apply contrast with probability (contrast * strength).
if self.contrast > 0:
c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std)
c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
if debug_percentile is not None:
c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std))
C = scale3d(c, c, c) @ C
# Apply luma flip with probability (lumaflip * strength).
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis.
if self.lumaflip > 0:
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
# Apply hue rotation with probability (hue * strength).
if self.hue > 0 and num_channels > 1:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max
theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
C = rotate3d(v, theta) @ C # Rotate around v.
# Apply saturation with probability (saturation * strength).
if self.saturation > 0 and num_channels > 1:
s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std)
s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std))
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
# ------------------------------
# Execute color transformations.
# ------------------------------
# Execute if the transform is not identity.
if C is not I_4:
images = images.reshape([batch_size, num_channels, height * width])
if num_channels == 3:
images = C[:, :3, :3] @ images + C[:, :3, 3:]
elif num_channels == 1:
C = C[:, :3, :].mean(dim=1, keepdims=True)
images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
else:
raise ValueError('Image must be RGB (3 channels) or L (1 channel)')
images = images.reshape([batch_size, num_channels, height, width])
# ----------------------
# Image-space filtering.
# ----------------------
if self.imgfilter > 0:
num_bands = self.Hz_fbank.shape[0]
assert len(self.imgfilter_bands) == num_bands
expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f).
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity).
for i, band_strength in enumerate(self.imgfilter_bands):
t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std)
t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
if debug_percentile is not None:
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector.
t[:, i] = t_i # Replace i'th element.
t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power.
g = g * t # Accumulate into global gain.
# Construct combined amplification filter.
Hz_prime = g @ self.Hz_fbank # [batch, tap]
Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap]
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap]
# Apply filter.
p = self.Hz_fbank.shape[1] // 2
images = images.reshape([1, batch_size * num_channels, height, width])
images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect')
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
images = images.reshape([batch_size, num_channels, height, width])
# ------------------------
# Image-space corruptions.
# ------------------------
# Apply additive RGB noise with probability (noise * strength).
if self.noise > 0:
sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std
sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
if debug_percentile is not None:
sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std)
images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma
# Apply cutout with probability (cutout * strength).
if self.cutout > 0:
size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device)
size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
if debug_percentile is not None:
size = torch.full_like(size, self.cutout_size)
center = torch.full_like(center, debug_percentile)
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1])
mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2)
mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2)
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
images = images * mask
return images
#----------------------------------------------------------------------------
| 26,617 | 59.910755 | 366 | py |
stylegan3 | stylegan3-main/training/dataset.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Streaming images and labels from datasets created with dataset_tool.py."""
import os
import numpy as np
import zipfile
import PIL.Image
import json
import torch
import dnnlib
try:
import pyspng
except ImportError:
pyspng = None
#----------------------------------------------------------------------------
class Dataset(torch.utils.data.Dataset):
def __init__(self,
name, # Name of the dataset.
raw_shape, # Shape of the raw image data (NCHW).
max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.
use_labels = False, # Enable conditioning labels? False = label dimension is zero.
xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size.
random_seed = 0, # Random seed to use when applying max_size.
):
self._name = name
self._raw_shape = list(raw_shape)
self._use_labels = use_labels
self._raw_labels = None
self._label_shape = None
# Apply max_size.
self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64)
if (max_size is not None) and (self._raw_idx.size > max_size):
np.random.RandomState(random_seed).shuffle(self._raw_idx)
self._raw_idx = np.sort(self._raw_idx[:max_size])
# Apply xflip.
self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8)
if xflip:
self._raw_idx = np.tile(self._raw_idx, 2)
self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)])
def _get_raw_labels(self):
if self._raw_labels is None:
self._raw_labels = self._load_raw_labels() if self._use_labels else None
if self._raw_labels is None:
self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32)
assert isinstance(self._raw_labels, np.ndarray)
assert self._raw_labels.shape[0] == self._raw_shape[0]
assert self._raw_labels.dtype in [np.float32, np.int64]
if self._raw_labels.dtype == np.int64:
assert self._raw_labels.ndim == 1
assert np.all(self._raw_labels >= 0)
return self._raw_labels
def close(self): # to be overridden by subclass
pass
def _load_raw_image(self, raw_idx): # to be overridden by subclass
raise NotImplementedError
def _load_raw_labels(self): # to be overridden by subclass
raise NotImplementedError
def __getstate__(self):
return dict(self.__dict__, _raw_labels=None)
def __del__(self):
try:
self.close()
except:
pass
def __len__(self):
return self._raw_idx.size
def __getitem__(self, idx):
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
return image.copy(), self.get_label(idx)
def get_label(self, idx):
label = self._get_raw_labels()[self._raw_idx[idx]]
if label.dtype == np.int64:
onehot = np.zeros(self.label_shape, dtype=np.float32)
onehot[label] = 1
label = onehot
return label.copy()
def get_details(self, idx):
d = dnnlib.EasyDict()
d.raw_idx = int(self._raw_idx[idx])
d.xflip = (int(self._xflip[idx]) != 0)
d.raw_label = self._get_raw_labels()[d.raw_idx].copy()
return d
@property
def name(self):
return self._name
@property
def image_shape(self):
return list(self._raw_shape[1:])
@property
def num_channels(self):
assert len(self.image_shape) == 3 # CHW
return self.image_shape[0]
@property
def resolution(self):
assert len(self.image_shape) == 3 # CHW
assert self.image_shape[1] == self.image_shape[2]
return self.image_shape[1]
@property
def label_shape(self):
if self._label_shape is None:
raw_labels = self._get_raw_labels()
if raw_labels.dtype == np.int64:
self._label_shape = [int(np.max(raw_labels)) + 1]
else:
self._label_shape = raw_labels.shape[1:]
return list(self._label_shape)
@property
def label_dim(self):
assert len(self.label_shape) == 1
return self.label_shape[0]
@property
def has_labels(self):
return any(x != 0 for x in self.label_shape)
@property
def has_onehot_labels(self):
return self._get_raw_labels().dtype == np.int64
#----------------------------------------------------------------------------
class ImageFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._zipfile = None
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
finally:
self._zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
#----------------------------------------------------------------------------
| 8,642 | 35.16318 | 158 | py |
stylegan3 | stylegan3-main/training/training_loop.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Main training loop."""
import os
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
if not training_set.has_labels:
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
else:
# Group training samples by label.
label_groups = dict() # label => [idx, ...]
for idx in range(len(training_set)):
label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
if label not in label_groups:
label_groups[label] = []
label_groups[label].append(idx)
# Reorder.
label_order = sorted(label_groups.keys())
for label in label_order:
rnd.shuffle(label_groups[label])
# Organize into grid.
grid_indices = []
for y in range(gh):
label = label_order[y % len(label_order)]
indices = label_groups[label]
grid_indices += [indices[x % len(indices)] for x in range(gw)]
label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape([gh, gw, C, H, W])
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape([gh * H, gw * W, C])
assert C in [1, 3]
if C == 1:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup.
G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
resume_kimg = 0, # First kimg to report when resuming training.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
):
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy.
torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy.
conv2d_gradfix.enabled = True # Improves training speed.
grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print()
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G_ema = copy.deepcopy(G).eval()
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
img = misc.print_module_summary(G, [z, c])
misc.print_module_summary(D, [img, c])
# Setup augmentation.
if rank == 0:
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if ada_target is not None:
ada_stats = training_stats.Collector(regex='Loss/signs/real')
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
for module in [G, D, G_ema, augment_pipe]:
if module is not None and num_gpus > 1:
for param in misc.params_and_buffers(module):
torch.distributed.broadcast(param, src=0)
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe, **loss_kwargs) # subclass of training.loss.Loss
phases = []
for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
if reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
else: # Lazy regularization.
mb_ratio = reg_interval / (reg_interval + 1)
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = opt_kwargs.lr * mb_ratio
opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
if rank == 0:
print('Exporting sample images...')
grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = resume_kimg * 1000
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
# Fetch training data.
with torch.autograd.profiler.record_function('data_fetch'):
phase_real_img, phase_real_c = next(training_set_iterator)
phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
phase_real_c = phase_real_c.to(device).split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
# Execute training phases.
for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):
if batch_idx % phase.interval != 0:
continue
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
# Accumulate gradients.
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
for real_img, real_c, gen_z, gen_c in zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c):
loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg)
phase.module.requires_grad_(False)
# Update weights.
with torch.autograd.profiler.record_function(phase.name + '_opt'):
params = [param for param in phase.module.parameters() if param.grad is not None]
if len(params) > 0:
flat = torch.cat([param.grad.flatten() for param in params])
if num_gpus > 1:
torch.distributed.all_reduce(flat)
flat /= num_gpus
misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat)
grads = flat.split([param.numel() for param in params])
for param, grad in zip(params, grads):
param.grad = grad.reshape(param.shape)
phase.opt.step()
# Phase done.
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Execute ADA heuristic.
if (ada_stats is not None) and (batch_idx % ada_interval == 0):
ada_stats.update()
adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in training_stats.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(G=G, D=D, G_ema=G_ema, augment_pipe=augment_pipe, training_set_kwargs=dict(training_set_kwargs))
for key, value in snapshot_data.items():
if isinstance(value, torch.nn.Module):
value = copy.deepcopy(value).eval().requires_grad_(False)
if num_gpus > 1:
misc.check_ddp_consistency(value, ignore_regex=r'.*\.[^.]+_(avg|ema)')
for param in misc.params_and_buffers(value):
torch.distributed.broadcast(param, src=0)
snapshot_data[key] = value.cpu()
del value # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0):
if rank == 0:
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 21,879 | 50.121495 | 168 | py |
stylegan3 | stylegan3-main/training/networks_stylegan2.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Network architectures from the paper
"Analyzing and Improving the Image Quality of StyleGAN".
Matches the original implementation of configs E-F by Karras et al. at
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#----------------------------------------------------------------------------
@misc.profiled_function
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
def extra_repr(self):
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class Conv2dLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
channels_last = False, # Expect the input to have memory_format=channels_last?
trainable = True, # Update the weights of this layer during training?
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.activation = activation
self.up = up
self.down = down
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = bias_act.activation_funcs[activation].def_gain
memory_format = torch.channels_last if channels_last else torch.contiguous_format
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer('weight', weight)
if bias is not None:
self.register_buffer('bias', bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
b = self.bias.to(x.dtype) if self.bias is not None else None
flip_weight = (self.up == 1) # slightly faster
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
return x
def extra_repr(self):
return ' '.join([
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
f'up={self.up}, down={self.down}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.998, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if update_emas and self.w_avg_beta is not None:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
def extra_repr(self):
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size = 3, # Convolution kernel size.
up = 1, # Integer upsampling factor.
use_noise = True, # Enable noise input?
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last = False, # Use channels_last format for the weights?
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.w_dim = w_dim
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = bias_act.activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
if use_noise:
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
assert noise_mode in ['random', 'const', 'none']
in_resolution = self.resolution // self.up
misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution])
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == 'random':
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
if self.use_noise and noise_mode == 'const':
noise = self.noise_const * self.noise_strength
flip_weight = (self.up == 1) # slightly faster
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
return x
def extra_repr(self):
return ' '.join([
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class ToRGBLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.w_dim = w_dim
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
def extra_repr(self):
return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.fused_modconv_default = fused_modconv_default
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
_ = update_emas # unused
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
w_iter = iter(ws.unbind(dim=1))
if ws.device.type != 'cuda':
force_fp32 = True
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
fused_modconv = self.fused_modconv_default
if fused_modconv == 'inference_only':
fused_modconv = (not self.training)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ToRGB.
if img is not None:
misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
img = upfirdn2d.upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == 'skip':
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.num_fp16_res = num_fp16_res
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.num_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
def forward(self, ws, **block_kwargs):
block_ws = []
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32)
w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
x = img = None
for res, cur_ws in zip(self.block_resolutions, block_ws):
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, **block_kwargs)
return img
def extra_repr(self):
return ' '.join([
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
f'num_fp16_res={self.num_fp16_res:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
freeze_layers = 0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = (layer_idx >= freeze_layers)
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0 or architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
if architecture == 'resnet':
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, force_fp32=False):
if (x if x is not None else img).device.type != 'cuda':
force_fp32 = True
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
# Input.
if x is not None:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0 or self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
# Main layers.
if self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
x = self.conv1(x)
assert x.dtype == dtype
return x, img
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
F = self.num_channels
c = C // F
y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
def extra_repr(self):
return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorEpilogue(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
def forward(self, x, img, cmap, force_fp32=False):
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
if self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
x = x + self.fromrgb(img)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
x = self.conv(x)
x = self.fc(x.flatten(1))
x = self.out(x)
# Conditioning.
if self.cmap_dim > 0:
misc.assert_shape(cmap, [None, self.cmap_dim])
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, update_emas=False, **block_kwargs):
_ = update_emas # unused
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
def extra_repr(self):
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
#----------------------------------------------------------------------------
| 40,302 | 49.695597 | 164 | py |
stylegan3 | stylegan3-main/training/networks_stylegan3.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generator architecture from the paper
"Alias-Free Generative Adversarial Networks"."""
import numpy as np
import scipy.signal
import scipy.optimize
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import filtered_lrelu
from torch_utils.ops import bias_act
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor: [batch_size, in_channels, in_height, in_width]
w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width]
s, # Style tensor: [batch_size, in_channels]
demodulate = True, # Apply weight demodulation?
padding = 0, # Padding: int or [padH, padW]
input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels]
):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(x.shape[0])
out_channels, in_channels, kh, kw = w.shape
misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(s, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs.
if demodulate:
w = w * w.square().mean([1,2,3], keepdim=True).rsqrt()
s = s * s.square().mean().rsqrt()
# Modulate weights.
w = w.unsqueeze(0) # [NOIkk]
w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Demodulate weights.
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Apply input scaling.
if input_gain is not None:
input_gain = input_gain.expand(batch_size, in_channels) # [NI]
w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Execute as one fused op using grouped convolution.
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size)
x = x.reshape(batch_size, -1, *x.shape[2:])
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
bias = True, # Apply additive bias before the activation function?
lr_multiplier = 1, # Learning rate multiplier.
weight_init = 1, # Initial standard deviation of the weight tensor.
bias_init = 0, # Initial value of the additive bias.
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) * (weight_init / lr_multiplier))
bias_init = np.broadcast_to(np.asarray(bias_init, dtype=np.float32), [out_features])
self.bias = torch.nn.Parameter(torch.from_numpy(bias_init / lr_multiplier)) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
def extra_repr(self):
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality, 0 = no labels.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output.
num_layers = 2, # Number of mapping layers.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.998, # Decay for tracking the moving average of W during training.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
# Construct layers.
self.embed = FullyConnectedLayer(self.c_dim, self.w_dim) if self.c_dim > 0 else None
features = [self.z_dim + (self.w_dim if self.c_dim > 0 else 0)] + [self.w_dim] * self.num_layers
for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]):
layer = FullyConnectedLayer(in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
misc.assert_shape(z, [None, self.z_dim])
if truncation_cutoff is None:
truncation_cutoff = self.num_ws
# Embed, normalize, and concatenate inputs.
x = z.to(torch.float32)
x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt()
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = self.embed(c.to(torch.float32))
y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt()
x = torch.cat([x, y], dim=1) if x is not None else y
# Execute layers.
for idx in range(self.num_layers):
x = getattr(self, f'fc{idx}')(x)
# Update moving average of W.
if update_emas:
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast and apply truncation.
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
if truncation_psi != 1:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
def extra_repr(self):
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisInput(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
channels, # Number of output channels.
size, # Output spatial size: int or [width, height].
sampling_rate, # Output sampling rate.
bandwidth, # Output bandwidth.
):
super().__init__()
self.w_dim = w_dim
self.channels = channels
self.size = np.broadcast_to(np.asarray(size), [2])
self.sampling_rate = sampling_rate
self.bandwidth = bandwidth
# Draw random frequencies from uniform 2D disc.
freqs = torch.randn([self.channels, 2])
radii = freqs.square().sum(dim=1, keepdim=True).sqrt()
freqs /= radii * radii.square().exp().pow(0.25)
freqs *= bandwidth
phases = torch.rand([self.channels]) - 0.5
# Setup parameters and buffers.
self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels]))
self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0])
self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image.
self.register_buffer('freqs', freqs)
self.register_buffer('phases', phases)
def forward(self, w):
# Introduce batch dimension.
transforms = self.transform.unsqueeze(0) # [batch, row, col]
freqs = self.freqs.unsqueeze(0) # [batch, channel, xy]
phases = self.phases.unsqueeze(0) # [batch, channel]
# Apply learned transformation.
t = self.affine(w) # t = (r_c, r_s, t_x, t_y)
t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y)
m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image.
m_r[:, 0, 0] = t[:, 0] # r'_c
m_r[:, 0, 1] = -t[:, 1] # r'_s
m_r[:, 1, 0] = t[:, 1] # r'_s
m_r[:, 1, 1] = t[:, 0] # r'_c
m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image.
m_t[:, 0, 2] = -t[:, 2] # t'_x
m_t[:, 1, 2] = -t[:, 3] # t'_y
transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform.
# Transform frequencies.
phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2)
freqs = freqs @ transforms[:, :2, :2]
# Dampen out-of-band frequencies that may occur due to the user-specified transform.
amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1)
# Construct sampling grid.
theta = torch.eye(2, 3, device=w.device)
theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate
theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate
grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False)
# Compute Fourier features.
x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel]
x = x + phases.unsqueeze(1).unsqueeze(2)
x = torch.sin(x * (np.pi * 2))
x = x * amplitudes.unsqueeze(1).unsqueeze(2)
# Apply trainable mapping.
weight = self.weight / np.sqrt(self.channels)
x = x @ weight.t()
# Ensure correct shape.
x = x.permute(0, 3, 1, 2) # [batch, channel, height, width]
misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])])
return x
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},',
f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
is_torgb, # Is this the final ToRGB layer?
is_critically_sampled, # Does this layer use critical sampling?
use_fp16, # Does this layer use FP16?
# Input & output specifications.
in_channels, # Number of input channels.
out_channels, # Number of output channels.
in_size, # Input spatial size: int or [width, height].
out_size, # Output spatial size: int or [width, height].
in_sampling_rate, # Input sampling rate (s).
out_sampling_rate, # Output sampling rate (s).
in_cutoff, # Input cutoff frequency (f_c).
out_cutoff, # Output cutoff frequency (f_c).
in_half_width, # Input transition band half-width (f_h).
out_half_width, # Output Transition band half-width (f_h).
# Hyperparameters.
conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer.
filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling.
lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.
use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers.
conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping.
magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes.
):
super().__init__()
self.w_dim = w_dim
self.is_torgb = is_torgb
self.is_critically_sampled = is_critically_sampled
self.use_fp16 = use_fp16
self.in_channels = in_channels
self.out_channels = out_channels
self.in_size = np.broadcast_to(np.asarray(in_size), [2])
self.out_size = np.broadcast_to(np.asarray(out_size), [2])
self.in_sampling_rate = in_sampling_rate
self.out_sampling_rate = out_sampling_rate
self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling)
self.in_cutoff = in_cutoff
self.out_cutoff = out_cutoff
self.in_half_width = in_half_width
self.out_half_width = out_half_width
self.conv_kernel = 1 if is_torgb else conv_kernel
self.conv_clamp = conv_clamp
self.magnitude_ema_beta = magnitude_ema_beta
# Setup parameters and buffers.
self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1)
self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel]))
self.bias = torch.nn.Parameter(torch.zeros([self.out_channels]))
self.register_buffer('magnitude_ema', torch.ones([]))
# Design upsampling filter.
self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate))
assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate
self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1
self.register_buffer('up_filter', self.design_lowpass_filter(
numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate))
# Design downsampling filter.
self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate))
assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate
self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1
self.down_radial = use_radial_filters and not self.is_critically_sampled
self.register_buffer('down_filter', self.design_lowpass_filter(
numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial))
# Compute padding.
pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling.
pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling.
pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters.
pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3).
pad_hi = pad_total - pad_lo
self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])]
def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False):
assert noise_mode in ['random', 'const', 'none'] # unused
misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])])
misc.assert_shape(w, [x.shape[0], self.w_dim])
# Track input magnitude.
if update_emas:
with torch.autograd.profiler.record_function('update_magnitude_ema'):
magnitude_cur = x.detach().to(torch.float32).square().mean()
self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta))
input_gain = self.magnitude_ema.rsqrt()
# Execute affine layer.
styles = self.affine(w)
if self.is_torgb:
weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2))
styles = styles * weight_gain
# Execute modulated conv2d.
dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32
x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles,
padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain)
# Execute bias, filtered leaky ReLU, and clamping.
gain = 1 if self.is_torgb else np.sqrt(2)
slope = 1 if self.is_torgb else 0.2
x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),
up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp)
# Ensure correct shape and dtype.
misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])])
assert x.dtype == dtype
return x
@staticmethod
def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False):
assert numtaps >= 1
# Identity filter.
if numtaps == 1:
return None
# Separable Kaiser low-pass filter.
if not radial:
f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs)
return torch.as_tensor(f, dtype=torch.float32)
# Radially symmetric jinc-based filter.
x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs
r = np.hypot(*np.meshgrid(x, x))
f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r)
beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2)))
w = np.kaiser(numtaps, beta)
f *= np.outer(w, w)
f /= np.sum(f)
return torch.as_tensor(f, dtype=torch.float32)
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},',
f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},',
f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},',
f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},',
f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},',
f'in_size={list(self.in_size)}, out_size={list(self.out_size)},',
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_layers = 14, # Total number of layers, excluding Fourier features and ToRGB.
num_critical = 2, # Number of critically sampled layers at the end.
first_cutoff = 2, # Cutoff frequency of the first layer (f_{c,0}).
first_stopband = 2**2.1, # Minimum stopband of the first layer (f_{t,0}).
last_stopband_rel = 2**0.3, # Minimum stopband of the last layer, expressed relative to the cutoff.
margin_size = 10, # Number of additional pixels outside the image.
output_scale = 0.25, # Scale factor for the output image.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
**layer_kwargs, # Arguments for SynthesisLayer.
):
super().__init__()
self.w_dim = w_dim
self.num_ws = num_layers + 2
self.img_resolution = img_resolution
self.img_channels = img_channels
self.num_layers = num_layers
self.num_critical = num_critical
self.margin_size = margin_size
self.output_scale = output_scale
self.num_fp16_res = num_fp16_res
# Geometric progression of layer cutoffs and min. stopbands.
last_cutoff = self.img_resolution / 2 # f_{c,N}
last_stopband = last_cutoff * last_stopband_rel # f_{t,N}
exponents = np.minimum(np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1)
cutoffs = first_cutoff * (last_cutoff / first_cutoff) ** exponents # f_c[i]
stopbands = first_stopband * (last_stopband / first_stopband) ** exponents # f_t[i]
# Compute remaining layer parameters.
sampling_rates = np.exp2(np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i]
half_widths = np.maximum(stopbands, sampling_rates / 2) - cutoffs # f_h[i]
sizes = sampling_rates + self.margin_size * 2
sizes[-2:] = self.img_resolution
channels = np.rint(np.minimum((channel_base / 2) / cutoffs, channel_max))
channels[-1] = self.img_channels
# Construct layers.
self.input = SynthesisInput(
w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]),
sampling_rate=sampling_rates[0], bandwidth=cutoffs[0])
self.layer_names = []
for idx in range(self.num_layers + 1):
prev = max(idx - 1, 0)
is_torgb = (idx == self.num_layers)
is_critically_sampled = (idx >= self.num_layers - self.num_critical)
use_fp16 = (sampling_rates[idx] * (2 ** self.num_fp16_res) > self.img_resolution)
layer = SynthesisLayer(
w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16,
in_channels=int(channels[prev]), out_channels= int(channels[idx]),
in_size=int(sizes[prev]), out_size=int(sizes[idx]),
in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]),
in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx],
in_half_width=half_widths[prev], out_half_width=half_widths[idx],
**layer_kwargs)
name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}'
setattr(self, name, layer)
self.layer_names.append(name)
def forward(self, ws, **layer_kwargs):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32).unbind(dim=1)
# Execute layers.
x = self.input(ws[0])
for name, w in zip(self.layer_names, ws[1:]):
x = getattr(self, name)(x, w, **layer_kwargs)
if self.output_scale != 1:
x = x * self.output_scale
# Ensure correct shape and dtype.
misc.assert_shape(x, [None, self.img_channels, self.img_resolution, self.img_resolution])
x = x.to(torch.float32)
return x
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},',
f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
| 26,208 | 49.792636 | 141 | py |
stylegan3 | stylegan3-main/torch_utils/custom_ops.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import glob
import hashlib
import importlib
import os
import re
import shutil
import uuid
import torch
import torch.utils.cpp_extension
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
patterns = [
'C:/Program Files*/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files*/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files*/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files*/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
def _get_mangled_gpu_name():
name = torch.cuda.get_device_name().lower()
out = []
for c in name:
if re.match('[a-z0-9_-]+', c):
out.append(c)
else:
out.append('-')
return ''.join(out)
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
if headers is None:
headers = []
if source_dir is not None:
sources = [os.path.join(source_dir, fname) for fname in sources]
headers = [os.path.join(source_dir, fname) for fname in headers]
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
verbose_build = (verbosity == 'full')
# Compile and load.
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Some containers set TORCH_CUDA_ARCH_LIST to a list that can either
# break the build or unnecessarily restrict what's available to nvcc.
# Unset it to let nvcc decide based on what's available on the
# machine.
os.environ['TORCH_CUDA_ARCH_LIST'] = ''
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
#
# EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work
# around the *.cu dependency bug in ninja config.
#
all_source_files = sorted(sources + headers)
all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files)
if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):
# Compute combined hash digest for all source files.
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
# Select cached build directory name.
source_digest = hash_md5.hexdigest()
build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
if not os.path.isdir(cached_build_dir):
tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
os.makedirs(tmpdir)
for src in all_source_files:
shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))
try:
os.replace(tmpdir, cached_build_dir) # atomic
except OSError:
# source directory already exists, delete tmpdir and its contents.
shutil.rmtree(tmpdir)
if not os.path.isdir(cached_build_dir): raise
# Compile.
cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
verbose=verbose_build, sources=cached_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
# Load.
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache dict.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
| 6,646 | 41.06962 | 146 | py |
stylegan3 | stylegan3-main/torch_utils/training_stats.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for reporting and collecting training statistics across
multiple processes and devices. The interface is designed to minimize
synchronization overhead as well as the amount of boilerplate in user
code."""
import re
import numpy as np
import torch
import dnnlib
from . import misc
#----------------------------------------------------------------------------
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
_counter_dtype = torch.float64 # Data type to use for the internal counters.
_rank = 0 # Rank of the current process.
_sync_device = None # Device to use for multiprocess communication. None = single-process.
_sync_called = False # Has _sync() been called yet?
_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
#----------------------------------------------------------------------------
def init_multiprocessing(rank, sync_device):
r"""Initializes `torch_utils.training_stats` for collecting statistics
across multiple processes.
This function must be called after
`torch.distributed.init_process_group()` and before `Collector.update()`.
The call is not necessary if multi-process collection is not needed.
Args:
rank: Rank of the current process.
sync_device: PyTorch device to use for inter-process
communication, or None to disable multi-process
collection. Typically `torch.device('cuda', rank)`.
"""
global _rank, _sync_device
assert not _sync_called
_rank = rank
_sync_device = sync_device
#----------------------------------------------------------------------------
@misc.profiled_function
def report(name, value):
r"""Broadcasts the given set of scalars to all interested instances of
`Collector`, across device and process boundaries.
This function is expected to be extremely cheap and can be safely
called from anywhere in the training loop, loss function, or inside a
`torch.nn.Module`.
Warning: The current implementation expects the set of unique names to
be consistent across processes. Please make sure that `report()` is
called at least once for each unique name by each process, and in the
same order. If a given process has no scalars to broadcast, it can do
`report(name, [])` (empty list).
Args:
name: Arbitrary string specifying the name of the statistic.
Averages are accumulated separately for each unique name.
value: Arbitrary set of scalars. Can be a list, tuple,
NumPy array, PyTorch tensor, or Python scalar.
Returns:
The same `value` that was passed in.
"""
if name not in _counters:
_counters[name] = dict()
elems = torch.as_tensor(value)
if elems.numel() == 0:
return value
elems = elems.detach().flatten().to(_reduce_dtype)
moments = torch.stack([
torch.ones_like(elems).sum(),
elems.sum(),
elems.square().sum(),
])
assert moments.ndim == 1 and moments.shape[0] == _num_moments
moments = moments.to(_counter_dtype)
device = moments.device
if device not in _counters[name]:
_counters[name][device] = torch.zeros_like(moments)
_counters[name][device].add_(moments)
return value
#----------------------------------------------------------------------------
def report0(name, value):
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
but ignores any scalars provided by the other processes.
See `report()` for further details.
"""
report(name, value if _rank == 0 else [])
return value
#----------------------------------------------------------------------------
class Collector:
r"""Collects the scalars broadcasted by `report()` and `report0()` and
computes their long-term averages (mean and standard deviation) over
user-defined periods of time.
The averages are first collected into internal counters that are not
directly visible to the user. They are then copied to the user-visible
state as a result of calling `update()` and can then be queried using
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
internal counters for the next round, so that the user-visible state
effectively reflects averages collected between the last two calls to
`update()`.
Args:
regex: Regular expression defining which statistics to
collect. The default is to collect everything.
keep_previous: Whether to retain the previous averages if no
scalars were collected on a given round
(default: True).
"""
def __init__(self, regex='.*', keep_previous=True):
self._regex = re.compile(regex)
self._keep_previous = keep_previous
self._cumulative = dict()
self._moments = dict()
self.update()
self._moments.clear()
def names(self):
r"""Returns the names of all statistics broadcasted so far that
match the regular expression specified at construction time.
"""
return [name for name in _counters if self._regex.fullmatch(name)]
def update(self):
r"""Copies current values of the internal counters to the
user-visible state and resets them for the next round.
If `keep_previous=True` was specified at construction time, the
operation is skipped for statistics that have received no scalars
since the last update, retaining their previous averages.
This method performs a number of GPU-to-CPU transfers and one
`torch.distributed.all_reduce()`. It is intended to be called
periodically in the main training loop, typically once every
N training steps.
"""
if not self._keep_previous:
self._moments.clear()
for name, cumulative in _sync(self.names()):
if name not in self._cumulative:
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
delta = cumulative - self._cumulative[name]
self._cumulative[name].copy_(cumulative)
if float(delta[0]) != 0:
self._moments[name] = delta
def _get_delta(self, name):
r"""Returns the raw moments that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
assert self._regex.fullmatch(name)
if name not in self._moments:
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
return self._moments[name]
def num(self, name):
r"""Returns the number of scalars that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
delta = self._get_delta(name)
return int(delta[0])
def mean(self, name):
r"""Returns the mean of the scalars that were accumulated for the
given statistic between the last two calls to `update()`, or NaN if
no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0:
return float('nan')
return float(delta[1] / delta[0])
def std(self, name):
r"""Returns the standard deviation of the scalars that were
accumulated for the given statistic between the last two calls to
`update()`, or NaN if no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
return float('nan')
if int(delta[0]) == 1:
return float(0)
mean = float(delta[1] / delta[0])
raw_var = float(delta[2] / delta[0])
return np.sqrt(max(raw_var - np.square(mean), 0))
def as_dict(self):
r"""Returns the averages accumulated between the last two calls to
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
dnnlib.EasyDict(
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
...
)
"""
stats = dnnlib.EasyDict()
for name in self.names():
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
return stats
def __getitem__(self, name):
r"""Convenience getter.
`collector[name]` is a synonym for `collector.mean(name)`.
"""
return self.mean(name)
#----------------------------------------------------------------------------
def _sync(names):
r"""Synchronize the global cumulative counters across devices and
processes. Called internally by `Collector.update()`.
"""
if len(names) == 0:
return []
global _sync_called
_sync_called = True
# Collect deltas within current rank.
deltas = []
device = _sync_device if _sync_device is not None else torch.device('cpu')
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
# Sum deltas across ranks.
if _sync_device is not None:
torch.distributed.all_reduce(deltas)
# Update cumulative values.
deltas = deltas.cpu()
for idx, name in enumerate(names):
if name not in _cumulative:
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
# Return name-value pairs.
return [(name, _cumulative[name]) for name in names]
#----------------------------------------------------------------------------
| 10,720 | 38.855019 | 118 | py |
stylegan3 | stylegan3-main/torch_utils/persistence.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for pickling Python code alongside other data.
The pickled code is automatically imported into a separate Python module
during unpickling. This way, any previously exported pickles will remain
usable even if the original code is no longer available, or if the current
version of the code is not consistent with what was originally pickled."""
import sys
import pickle
import io
import inspect
import copy
import uuid
import types
import dnnlib
#----------------------------------------------------------------------------
_version = 6 # internal version number
_decorators = set() # {decorator_class, ...}
_import_hooks = [] # [hook_function, ...]
_module_to_src_dict = dict() # {module: src, ...}
_src_to_module_dict = dict() # {src: module, ...}
#----------------------------------------------------------------------------
def persistent_class(orig_class):
r"""Class decorator that extends a given class to save its source code
when pickled.
Example:
from torch_utils import persistence
@persistence.persistent_class
class MyNetwork(torch.nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.fc = MyLayer(num_inputs, num_outputs)
...
@persistence.persistent_class
class MyLayer(torch.nn.Module):
...
When pickled, any instance of `MyNetwork` and `MyLayer` will save its
source code alongside other internal state (e.g., parameters, buffers,
and submodules). This way, any previously exported pickle will remain
usable even if the class definitions have been modified or are no
longer available.
The decorator saves the source code of the entire Python module
containing the decorated class. It does *not* save the source code of
any imported modules. Thus, the imported modules must be available
during unpickling, also including `torch_utils.persistence` itself.
It is ok to call functions defined in the same module from the
decorated class. However, if the decorated class depends on other
classes defined in the same module, they must be decorated as well.
This is illustrated in the above example in the case of `MyLayer`.
It is also possible to employ the decorator just-in-time before
calling the constructor. For example:
cls = MyLayer
if want_to_make_it_persistent:
cls = persistence.persistent_class(cls)
layer = cls(num_inputs, num_outputs)
As an additional feature, the decorator also keeps track of the
arguments that were used to construct each instance of the decorated
class. The arguments can be queried via `obj.init_args` and
`obj.init_kwargs`, and they are automatically pickled alongside other
object state. A typical use case is to first unpickle a previous
instance of a persistent class, and then upgrade it to use the latest
version of the source code:
with open('old_pickle.pkl', 'rb') as f:
old_net = pickle.load(f)
new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
misc.copy_params_and_buffers(old_net, new_net, require_all=True)
"""
assert isinstance(orig_class, type)
if is_persistent(orig_class):
return orig_class
assert orig_class.__module__ in sys.modules
orig_module = sys.modules[orig_class.__module__]
orig_module_src = _module_to_src(orig_module)
class Decorator(orig_class):
_orig_module_src = orig_module_src
_orig_class_name = orig_class.__name__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_args = copy.deepcopy(args)
self._init_kwargs = copy.deepcopy(kwargs)
assert orig_class.__name__ in orig_module.__dict__
_check_pickleable(self.__reduce__())
@property
def init_args(self):
return copy.deepcopy(self._init_args)
@property
def init_kwargs(self):
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
def __reduce__(self):
fields = list(super().__reduce__())
fields += [None] * max(3 - len(fields), 0)
if fields[0] is not _reconstruct_persistent_obj:
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
fields[0] = _reconstruct_persistent_obj # reconstruct func
fields[1] = (meta,) # reconstruct args
fields[2] = None # state dict
return tuple(fields)
Decorator.__name__ = orig_class.__name__
_decorators.add(Decorator)
return Decorator
#----------------------------------------------------------------------------
def is_persistent(obj):
r"""Test whether the given object or class is persistent, i.e.,
whether it will save its source code when pickled.
"""
try:
if obj in _decorators:
return True
except TypeError:
pass
return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
#----------------------------------------------------------------------------
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook)
#----------------------------------------------------------------------------
def _reconstruct_persistent_obj(meta):
r"""Hook that is called internally by the `pickle` module to unpickle
a persistent object.
"""
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert meta is not None
assert meta.version == _version
module = _src_to_module(meta.module_src)
assert meta.type == 'class'
orig_class = module.__dict__[meta.class_name]
decorator_class = persistent_class(orig_class)
obj = decorator_class.__new__(decorator_class)
setstate = getattr(obj, '__setstate__', None)
if callable(setstate):
setstate(meta.state) # pylint: disable=not-callable
else:
obj.__dict__.update(meta.state)
return obj
#----------------------------------------------------------------------------
def _module_to_src(module):
r"""Query the source code of a given Python module.
"""
src = _module_to_src_dict.get(module, None)
if src is None:
src = inspect.getsource(module)
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
return src
def _src_to_module(src):
r"""Get or create a Python module for the given source code.
"""
module = _src_to_module_dict.get(src, None)
if module is None:
module_name = "_imported_module_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
exec(src, module.__dict__) # pylint: disable=exec-used
return module
#----------------------------------------------------------------------------
def _check_pickleable(obj):
r"""Check that the given object is pickleable, raising an exception if
it is not. This function is expected to be considerably more efficient
than actually pickling the object.
"""
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for x, y in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None # Python primitive types are pickleable.
if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']:
return None # NumPy arrays and PyTorch tensors are pickleable.
if is_persistent(obj):
return None # Persistent objects are pickleable, by virtue of the constructor check.
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f)
#----------------------------------------------------------------------------
| 9,752 | 37.702381 | 144 | py |
stylegan3 | stylegan3-main/torch_utils/misc.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
#----------------------------------------------------------------------------
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
# same constant is used multiple times.
_constant_cache = dict()
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if shape is not None:
shape = tuple(shape)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.device('cpu')
if memory_format is None:
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if tensor is None:
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if shape is not None:
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor
#----------------------------------------------------------------------------
# Replace NaN/Inf with specified numerical values.
try:
nan_to_num = torch.nan_to_num # 1.8.0a0
except AttributeError:
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
#----------------------------------------------------------------------------
# Symbolic assert.
try:
symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
except AttributeError:
symbolic_assert = torch.Assert # 1.7.0
#----------------------------------------------------------------------------
# Context manager to temporarily suppress known warnings in torch.jit.trace().
# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672
@contextlib.contextmanager
def suppress_tracer_warnings():
flt = ('ignore', None, torch.jit.TracerWarning, None, 0)
warnings.filters.insert(0, flt)
yield
warnings.filters.remove(flt)
#----------------------------------------------------------------------------
# Assert that the shape of a tensor matches the given list of integers.
# None indicates that the size of a dimension is allowed to vary.
# Performs symbolic assertion when used in torch.jit.trace().
def assert_shape(tensor, ref_shape):
if tensor.ndim != len(ref_shape):
raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
if ref_size is None:
pass
elif isinstance(ref_size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
elif isinstance(size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
elif size != ref_size:
raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
#----------------------------------------------------------------------------
# Function decorator that calls torch.autograd.profiler.record_function().
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator
#----------------------------------------------------------------------------
# Sampler for torch.utils.data.DataLoader that loops over the dataset
# indefinitely, shuffling items as it goes.
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert len(dataset) > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
#----------------------------------------------------------------------------
# Utilities for operating with torch.nn.Module parameters and buffers.
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.parameters()) + list(module.buffers())
def named_params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.named_parameters()) + list(module.named_buffers())
def copy_params_and_buffers(src_module, dst_module, require_all=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = dict(named_params_and_buffers(src_module))
for name, tensor in named_params_and_buffers(dst_module):
assert (name in src_tensors) or (not require_all)
if name in src_tensors:
tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad)
#----------------------------------------------------------------------------
# Context manager for easily enabling/disabling DistributedDataParallel
# synchronization.
@contextlib.contextmanager
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
yield
else:
with module.no_sync():
yield
#----------------------------------------------------------------------------
# Check DistributedDataParallel consistency across processes.
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for name, tensor in named_params_and_buffers(module):
fullname = type(module).__name__ + '.' + name
if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
continue
tensor = tensor.detach()
if tensor.is_floating_point():
tensor = nan_to_num(tensor)
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (tensor == other).all(), fullname
#----------------------------------------------------------------------------
# Print summary table of module hierarchy.
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert not isinstance(module, torch.jit.ScriptModule)
assert isinstance(inputs, (tuple, list))
# Register hooks.
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if nesting[0] <= max_nesting:
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
# Run module.
outputs = module(*inputs)
for hook in hooks:
hook.remove()
# Identify unique outputs, parameters, and buffers.
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
# Filter out redundant entries.
if skip_redundant:
entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
# Construct table.
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [['---'] * len(rows[0])]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for name, mod in module.named_modules()}
for e in entries:
name = '<top-level>' if e.mod is module else submodule_names[e.mod]
param_size = sum(t.numel() for t in e.unique_params)
buffer_size = sum(t.numel() for t in e.unique_buffers)
output_shapes = [str(list(t.shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
rows += [[
name + (':0' if len(e.outputs) >= 2 else ''),
str(param_size) if param_size else '-',
str(buffer_size) if buffer_size else '-',
(output_shapes + ['-'])[0],
(output_dtypes + ['-'])[0],
]]
for idx in range(1, len(e.outputs)):
rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [['---'] * len(rows[0])]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
# Print table.
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
print()
return outputs
#----------------------------------------------------------------------------
| 11,106 | 40.599251 | 133 | py |
stylegan3 | stylegan3-main/torch_utils/ops/bias_act.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient bias and activation."""
import os
import numpy as np
import torch
import dnnlib
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='bias_act_plugin',
sources=['bias_act.cpp', 'bias_act.cu'],
headers=['bias_act.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'],
)
return True
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 9,845 | 45.885714 | 185 | py |
stylegan3 | stylegan3-main/torch_utils/ops/grid_sample_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.grid_sample` that
supports arbitrarily high order gradients between the input and output.
Only works on 2D images and assumes
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
import torch
from pkg_resources import parse_version
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
_use_pytorch_1_11_api = parse_version(torch.__version__) >= parse_version('1.11.0a') # Allow prerelease builds of 1.11
_use_pytorch_1_12_api = parse_version(torch.__version__) >= parse_version('1.12.0a') # Allow prerelease builds of 1.12
#----------------------------------------------------------------------------
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
#----------------------------------------------------------------------------
def _should_use_custom_op():
return enabled
#----------------------------------------------------------------------------
class _GridSample2dForward(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid):
assert input.ndim == 4
assert grid.ndim == 4
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
return grad_input, grad_grid
#----------------------------------------------------------------------------
class _GridSample2dBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input, grid):
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
if _use_pytorch_1_12_api:
op = op[0]
if _use_pytorch_1_11_api:
output_mask = (ctx.needs_input_grad[1], ctx.needs_input_grad[2])
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False, output_mask)
else:
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
ctx.save_for_backward(grid)
return grad_input, grad_grid
@staticmethod
def backward(ctx, grad2_grad_input, grad2_grad_grid):
_ = grad2_grad_grid # unused
grid, = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
grad2_grid = None
if ctx.needs_input_grad[0]:
grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
assert not ctx.needs_input_grad[2]
return grad2_grad_output, grad2_input, grad2_grid
#----------------------------------------------------------------------------
| 3,575 | 40.103448 | 132 | py |
stylegan3 | stylegan3-main/torch_utils/ops/conv2d_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.conv2d` that supports
arbitrarily high order gradients with zero performance penalty."""
import contextlib
import torch
from pkg_resources import parse_version
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
_use_pytorch_1_11_api = parse_version(torch.__version__) >= parse_version('1.11.0a') # Allow prerelease builds of 1.11
@contextlib.contextmanager
def no_weight_gradients(disable=True):
global weight_gradients_disabled
old = weight_gradients_disabled
if disable:
weight_gradients_disabled = True
yield
weight_gradients_disabled = old
#----------------------------------------------------------------------------
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
#----------------------------------------------------------------------------
def _should_use_custom_op(input):
assert isinstance(input, torch.Tensor)
if (not enabled) or (not torch.backends.cudnn.enabled):
return False
if _use_pytorch_1_11_api:
# The work-around code doesn't work on PyTorch 1.11.0 onwards
return False
if input.device.type != 'cuda':
return False
return True
def _tuple_of_ints(xs, ndim):
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
assert len(xs) == ndim
assert all(isinstance(x, int) for x in xs)
return xs
#----------------------------------------------------------------------------
_conv2d_gradfix_cache = dict()
_null_tensor = torch.empty([0])
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
# Parse arguments.
ndim = 2
weight_shape = tuple(weight_shape)
stride = _tuple_of_ints(stride, ndim)
padding = _tuple_of_ints(padding, ndim)
output_padding = _tuple_of_ints(output_padding, ndim)
dilation = _tuple_of_ints(dilation, ndim)
# Lookup from cache.
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if key in _conv2d_gradfix_cache:
return _conv2d_gradfix_cache[key]
# Validate arguments.
assert groups >= 1
assert len(weight_shape) == ndim + 2
assert all(stride[i] >= 1 for i in range(ndim))
assert all(padding[i] >= 0 for i in range(ndim))
assert all(dilation[i] >= 0 for i in range(ndim))
if not transpose:
assert all(output_padding[i] == 0 for i in range(ndim))
else: # transpose
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
# Helpers.
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [
input_shape[i + 2]
- (output_shape[i + 2] - 1) * stride[i]
- (1 - 2 * padding[i])
- dilation[i] * (weight_shape[i + 2] - 1)
for i in range(ndim)
]
# Forward & backward.
class Conv2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
assert weight.shape == weight_shape
ctx.save_for_backward(
input if weight.requires_grad else _null_tensor,
weight if input.requires_grad else _null_tensor,
)
ctx.input_shape = input.shape
# Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere).
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0):
a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1])
b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1)
c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2)
c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1)
c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3)
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
# General case => cuDNN.
if transpose:
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
input_shape = ctx.input_shape
grad_input = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape)
op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
grad_input = op.apply(grad_output, weight, None)
assert grad_input.shape == input_shape
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
grad_weight = Conv2dGradWeight.apply(grad_output, input)
assert grad_weight.shape == weight_shape
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum([0, 2, 3])
return grad_input, grad_weight, grad_bias
# Gradient with respect to the weights.
class Conv2dGradWeight(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input):
ctx.save_for_backward(
grad_output if input.requires_grad else _null_tensor,
input if grad_output.requires_grad else _null_tensor,
)
ctx.grad_output_shape = grad_output.shape
ctx.input_shape = input.shape
# Simple 1x1 convolution => cuBLAS (on both Volta and Ampere).
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0):
a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape)
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
# General case => cuDNN.
name = 'aten::cudnn_convolution_transpose_backward_weight' if transpose else 'aten::cudnn_convolution_backward_weight'
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
return torch._C._jit_get_operation(name)(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
@staticmethod
def backward(ctx, grad2_grad_weight):
grad_output, input = ctx.saved_tensors
grad_output_shape = ctx.grad_output_shape
input_shape = ctx.input_shape
grad2_grad_output = None
grad2_input = None
if ctx.needs_input_grad[0]:
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
assert grad2_grad_output.shape == grad_output_shape
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape)
op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
grad2_input = op.apply(grad_output, grad2_grad_weight, None)
assert grad2_input.shape == input_shape
return grad2_grad_output, grad2_input
_conv2d_gradfix_cache[key] = Conv2d
return Conv2d
#----------------------------------------------------------------------------
| 9,745 | 46.77451 | 197 | py |
stylegan3 | stylegan3-main/torch_utils/ops/upfirdn2d.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient resampling of 2D images."""
import os
import numpy as np
import torch
from .. import custom_ops
from .. import misc
from . import conv2d_gradfix
#----------------------------------------------------------------------------
_plugin = None
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='upfirdn2d_plugin',
sources=['upfirdn2d.cpp', 'upfirdn2d.cu'],
headers=['upfirdn2d.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'],
)
return True
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
#----------------------------------------------------------------------------
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
#----------------------------------------------------------------------------
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
#----------------------------------------------------------------------------
@misc.profiled_function
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Check that upsampled buffer is not smaller than the filter.
upW = in_width * upx + padx0 + padx1
upH = in_height * upy + pady0 + pady1
assert upW >= f.shape[-1] and upH >= f.shape[0]
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
#----------------------------------------------------------------------------
_upfirdn2d_cuda_cache = dict()
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
# Parse arguments.
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Lookup from cache.
key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
# Forward op.
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
if f.ndim == 1 and f.shape[0] == 1:
f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1.
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0)
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain)
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [
fw - padx0 - 1,
iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1,
ih * upy - oh * downy + pady0 - upy + 1,
]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
# Add to cache.
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
#----------------------------------------------------------------------------
def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Filter a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape matches the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + fw // 2,
padx1 + (fw - 1) // 2,
pady0 + fh // 2,
pady1 + (fh - 1) // 2,
]
return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
#----------------------------------------------------------------------------
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
| 16,424 | 41.115385 | 120 | py |
stylegan3 | stylegan3-main/torch_utils/ops/filtered_lrelu.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import numpy as np
import torch
import warnings
from .. import custom_ops
from .. import misc
from . import upfirdn2d
from . import bias_act
#----------------------------------------------------------------------------
_plugin = None
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='filtered_lrelu_plugin',
sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],
headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'],
)
return True
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor)
assert 1 <= f.ndim <= 2
return f.shape[-1], f.shape[0] # width, height
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, (int, np.integer)) for x in padding)
padding = [int(x) for x in padding]
if len(padding) == 2:
px, py = padding
padding = [px, px, py, py]
px0, px1, py0, py1 = padding
return px0, px1, py0, py1
#----------------------------------------------------------------------------
def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
r"""Filtered leaky ReLU for a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Add channel-specific bias if provided (`b`).
2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
3. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
so that the footprint of all output pixels lies within the input image.
5. Multiply each value by the provided gain factor (`gain`).
6. Apply leaky ReLU activation function to each value.
7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
it so that the footprint of all output pixels lies within the input image.
9. Downsample the image by keeping every Nth pixel (`down`).
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float16/float64 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
fu: Float32 upsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
fd: Float32 downsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The length of vector must must match the channel dimension of `x`.
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor. (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
slope: Slope on the negative side of leaky ReLU (default: 0.2).
clamp: Maximum magnitude for leaky ReLU output (default: None).
flip_filter: False = convolution, True = correlation (default: False).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
#----------------------------------------------------------------------------
@misc.profiled_function
def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
"""Slow and memory-inefficient reference implementation of `filtered_lrelu()` using
existing `upfirdn2n()` and `bias_act()` ops.
"""
assert isinstance(x, torch.Tensor) and x.ndim == 4
fu_w, fu_h = _get_filter_size(fu)
fd_w, fd_h = _get_filter_size(fd)
if b is not None:
assert isinstance(b, torch.Tensor) and b.dtype == x.dtype
misc.assert_shape(b, [x.shape[1]])
assert isinstance(up, int) and up >= 1
assert isinstance(down, int) and down >= 1
px0, px1, py0, py1 = _parse_padding(padding)
assert gain == float(gain) and gain > 0
assert slope == float(slope) and slope >= 0
assert clamp is None or (clamp == float(clamp) and clamp >= 0)
# Calculate output size.
batch_size, channels, in_h, in_w = x.shape
in_dtype = x.dtype
out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down
out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down
# Compute using existing ops.
x = bias_act.bias_act(x=x, b=b) # Apply bias.
x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp.
x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample.
# Check output shape & dtype.
misc.assert_shape(x, [batch_size, channels, out_h, out_w])
assert x.dtype == in_dtype
return x
#----------------------------------------------------------------------------
_filtered_lrelu_cuda_cache = dict()
def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
"""Fast CUDA implementation of `filtered_lrelu()` using custom ops.
"""
assert isinstance(up, int) and up >= 1
assert isinstance(down, int) and down >= 1
px0, px1, py0, py1 = _parse_padding(padding)
assert gain == float(gain) and gain > 0
gain = float(gain)
assert slope == float(slope) and slope >= 0
slope = float(slope)
assert clamp is None or (clamp == float(clamp) and clamp >= 0)
clamp = float(clamp if clamp is not None else 'inf')
# Lookup from cache.
key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)
if key in _filtered_lrelu_cuda_cache:
return _filtered_lrelu_cuda_cache[key]
# Forward op.
class FilteredLReluCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
# Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).
if fu is None:
fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)
if fd is None:
fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert 1 <= fu.ndim <= 2
assert 1 <= fd.ndim <= 2
# Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.
if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:
fu = fu.square()[None]
if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:
fd = fd.square()[None]
# Missing sign input tensor.
if si is None:
si = torch.empty([0])
# Missing bias tensor.
if b is None:
b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)
# Construct internal sign tensor only if gradients are needed.
write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad)
# Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.
strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]
if any(a < b for a, b in zip(strides[:-1], strides[1:])):
warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning)
# Call C++/Cuda plugin if datatype is supported.
if x.dtype in [torch.float16, torch.float32]:
if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):
warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning)
y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)
else:
return_code = -1
# No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because
# only the bit-packed sign tensor is retained for gradient computation.
if return_code < 0:
warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning)
y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.
y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place.
y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample.
# Prepare for gradient computation.
ctx.save_for_backward(fu, fd, (si if si.numel() else so))
ctx.x_shape = x.shape
ctx.y_shape = y.shape
ctx.s_ofs = sx, sy
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
fu, fd, si = ctx.saved_tensors
_, _, xh, xw = ctx.x_shape
_, _, yh, yw = ctx.y_shape
sx, sy = ctx.s_ofs
dx = None # 0
dfu = None; assert not ctx.needs_input_grad[1]
dfd = None; assert not ctx.needs_input_grad[2]
db = None # 3
dsi = None; assert not ctx.needs_input_grad[4]
dsx = None; assert not ctx.needs_input_grad[5]
dsy = None; assert not ctx.needs_input_grad[6]
if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:
pp = [
(fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,
xw * up - yw * down + px0 - (up - 1),
(fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,
xh * up - yh * down + py0 - (up - 1),
]
gg = gain * (up ** 2) / (down ** 2)
ff = (not flip_filter)
sx = sx - (fu.shape[-1] - 1) + px0
sy = sy - (fu.shape[0] - 1) + py0
dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)
if ctx.needs_input_grad[3]:
db = dx.sum([0, 2, 3])
return dx, dfu, dfd, db, dsi, dsx, dsy
# Add to cache.
_filtered_lrelu_cuda_cache[key] = FilteredLReluCuda
return FilteredLReluCuda
#----------------------------------------------------------------------------
| 12,916 | 45.970909 | 164 | py |
stylegan3 | stylegan3-main/torch_utils/ops/conv2d_resample.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""2D convolution with optional up/downsampling."""
import torch
from .. import misc
from . import conv2d_gradfix
from . import upfirdn2d
from .upfirdn2d import _parse_padding
from .upfirdn2d import _get_filter_size
#----------------------------------------------------------------------------
def _get_weight_shape(w):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
shape = [int(sz) for sz in w.shape]
misc.assert_shape(w, shape)
return shape
#----------------------------------------------------------------------------
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
"""
_out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
# Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
if not flip_weight and (kw > 1 or kh > 1):
w = w.flip([2, 3])
# Execute using conv2d_gradfix.
op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
#----------------------------------------------------------------------------
@misc.profiled_function
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
#----------------------------------------------------------------------------
| 6,765 | 45.986111 | 130 | py |
stylegan3 | stylegan3-main/torch_utils/ops/fma.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
import torch
#----------------------------------------------------------------------------
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
#----------------------------------------------------------------------------
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
#----------------------------------------------------------------------------
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims+1:])
assert x.shape == shape
return x
#----------------------------------------------------------------------------
| 2,047 | 32.57377 | 105 | py |
stylegan3 | stylegan3-main/viz/renderer.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import copy
import traceback
import numpy as np
import torch
import torch.fft
import torch.nn
import matplotlib.cm
import dnnlib
from torch_utils.ops import upfirdn2d
import legacy # pylint: disable=import-error
#----------------------------------------------------------------------------
class CapturedException(Exception):
def __init__(self, msg=None):
if msg is None:
_type, value, _traceback = sys.exc_info()
assert value is not None
if isinstance(value, CapturedException):
msg = str(value)
else:
msg = traceback.format_exc()
assert isinstance(msg, str)
super().__init__(msg)
#----------------------------------------------------------------------------
class CaptureSuccess(Exception):
def __init__(self, out):
super().__init__()
self.out = out
#----------------------------------------------------------------------------
def _sinc(x):
y = (x * np.pi).abs()
z = torch.sin(y) / y.clamp(1e-30, float('inf'))
return torch.where(y < 1e-30, torch.ones_like(x), z)
def _lanczos_window(x, a):
x = x.abs() / a
return torch.where(x < 1, _sinc(x), torch.zeros_like(x))
#----------------------------------------------------------------------------
def _construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):
assert a <= amax < aflt
mat = torch.as_tensor(mat).to(torch.float32)
# Construct 2D filter taps in input & output coordinate spaces.
taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up)
yi, xi = torch.meshgrid(taps, taps)
xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2)
# Convolution of two oriented 2D sinc filters.
fi = _sinc(xi * cutoff_in) * _sinc(yi * cutoff_in)
fo = _sinc(xo * cutoff_out) * _sinc(yo * cutoff_out)
f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real
# Convolution of two oriented 2D Lanczos windows.
wi = _lanczos_window(xi, a) * _lanczos_window(yi, a)
wo = _lanczos_window(xo, a) * _lanczos_window(yo, a)
w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real
# Construct windowed FIR filter.
f = f * w
# Finalize.
c = (aflt - amax) * up
f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c]
f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up)
f = f / f.sum([0,2], keepdim=True) / (up ** 2)
f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1]
return f
#----------------------------------------------------------------------------
def _apply_affine_transformation(x, mat, up=4, **filter_kwargs):
_N, _C, H, W = x.shape
mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)
# Construct filter.
f = _construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)
assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1
p = f.shape[0] // 2
# Construct sampling grid.
theta = mat.inverse()
theta[:2, 2] *= 2
theta[0, 2] += 1 / up / W
theta[1, 2] += 1 / up / H
theta[0, :] *= W / (W + p / up * 2)
theta[1, :] *= H / (H + p / up * 2)
theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])
g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)
# Resample image.
y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)
z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)
# Form mask.
m = torch.zeros_like(y)
c = p * 2 + 1
m[:, :, c:-c, c:-c] = 1
m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)
return z, m
#----------------------------------------------------------------------------
class Renderer:
def __init__(self):
self._device = torch.device('cuda')
self._pkl_data = dict() # {pkl: dict | CapturedException, ...}
self._networks = dict() # {cache_key: torch.nn.Module, ...}
self._pinned_bufs = dict() # {(shape, dtype): torch.Tensor, ...}
self._cmaps = dict() # {name: torch.Tensor, ...}
self._is_timing = False
self._start_event = torch.cuda.Event(enable_timing=True)
self._end_event = torch.cuda.Event(enable_timing=True)
self._net_layers = dict() # {cache_key: [dnnlib.EasyDict, ...], ...}
def render(self, **args):
self._is_timing = True
self._start_event.record(torch.cuda.current_stream(self._device))
res = dnnlib.EasyDict()
try:
self._render_impl(res, **args)
except:
res.error = CapturedException()
self._end_event.record(torch.cuda.current_stream(self._device))
if 'image' in res:
res.image = self.to_cpu(res.image).numpy()
if 'stats' in res:
res.stats = self.to_cpu(res.stats).numpy()
if 'error' in res:
res.error = str(res.error)
if self._is_timing:
self._end_event.synchronize()
res.render_time = self._start_event.elapsed_time(self._end_event) * 1e-3
self._is_timing = False
return res
def get_network(self, pkl, key, **tweak_kwargs):
data = self._pkl_data.get(pkl, None)
if data is None:
print(f'Loading "{pkl}"... ', end='', flush=True)
try:
with dnnlib.util.open_url(pkl, verbose=False) as f:
data = legacy.load_network_pkl(f)
print('Done.')
except:
data = CapturedException()
print('Failed!')
self._pkl_data[pkl] = data
self._ignore_timing()
if isinstance(data, CapturedException):
raise data
orig_net = data[key]
cache_key = (orig_net, self._device, tuple(sorted(tweak_kwargs.items())))
net = self._networks.get(cache_key, None)
if net is None:
try:
net = copy.deepcopy(orig_net)
net = self._tweak_network(net, **tweak_kwargs)
net.to(self._device)
except:
net = CapturedException()
self._networks[cache_key] = net
self._ignore_timing()
if isinstance(net, CapturedException):
raise net
return net
def _tweak_network(self, net):
# Print diagnostics.
#for name, value in misc.named_params_and_buffers(net):
# if name.endswith('.magnitude_ema'):
# value = value.rsqrt().numpy()
# print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')
# if name.endswith('.weight') and value.ndim == 4:
# value = value.square().mean([1,2,3]).sqrt().numpy()
# print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')
return net
def _get_pinned_buf(self, ref):
key = (tuple(ref.shape), ref.dtype)
buf = self._pinned_bufs.get(key, None)
if buf is None:
buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory()
self._pinned_bufs[key] = buf
return buf
def to_device(self, buf):
return self._get_pinned_buf(buf).copy_(buf).to(self._device)
def to_cpu(self, buf):
return self._get_pinned_buf(buf).copy_(buf).clone()
def _ignore_timing(self):
self._is_timing = False
def _apply_cmap(self, x, name='viridis'):
cmap = self._cmaps.get(name, None)
if cmap is None:
cmap = matplotlib.cm.get_cmap(name)
cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3]
cmap = self.to_device(torch.from_numpy(cmap))
self._cmaps[name] = cmap
hi = cmap.shape[0] - 1
x = (x * hi + 0.5).clamp(0, hi).to(torch.int64)
x = torch.nn.functional.embedding(x, cmap)
return x
def _render_impl(self, res,
pkl = None,
w0_seeds = [[0, 1]],
stylemix_idx = [],
stylemix_seed = 0,
trunc_psi = 1,
trunc_cutoff = 0,
random_seed = 0,
noise_mode = 'const',
force_fp32 = False,
layer_name = None,
sel_channels = 3,
base_channel = 0,
img_scale_db = 0,
img_normalize = False,
fft_show = False,
fft_all = True,
fft_range_db = 50,
fft_beta = 8,
input_transform = None,
untransform = False,
):
# Dig up network details.
G = self.get_network(pkl, 'G_ema')
res.img_resolution = G.img_resolution
res.num_ws = G.num_ws
res.has_noise = any('noise_const' in name for name, _buf in G.synthesis.named_buffers())
res.has_input_transform = (hasattr(G.synthesis, 'input') and hasattr(G.synthesis.input, 'transform'))
# Set input transform.
if res.has_input_transform:
m = np.eye(3)
try:
if input_transform is not None:
m = np.linalg.inv(np.asarray(input_transform))
except np.linalg.LinAlgError:
res.error = CapturedException()
G.synthesis.input.transform.copy_(torch.from_numpy(m))
# Generate random latents.
all_seeds = [seed for seed, _weight in w0_seeds] + [stylemix_seed]
all_seeds = list(set(all_seeds))
all_zs = np.zeros([len(all_seeds), G.z_dim], dtype=np.float32)
all_cs = np.zeros([len(all_seeds), G.c_dim], dtype=np.float32)
for idx, seed in enumerate(all_seeds):
rnd = np.random.RandomState(seed)
all_zs[idx] = rnd.randn(G.z_dim)
if G.c_dim > 0:
all_cs[idx, rnd.randint(G.c_dim)] = 1
# Run mapping network.
w_avg = G.mapping.w_avg
all_zs = self.to_device(torch.from_numpy(all_zs))
all_cs = self.to_device(torch.from_numpy(all_cs))
all_ws = G.mapping(z=all_zs, c=all_cs, truncation_psi=trunc_psi, truncation_cutoff=trunc_cutoff) - w_avg
all_ws = dict(zip(all_seeds, all_ws))
# Calculate final W.
w = torch.stack([all_ws[seed] * weight for seed, weight in w0_seeds]).sum(dim=0, keepdim=True)
stylemix_idx = [idx for idx in stylemix_idx if 0 <= idx < G.num_ws]
if len(stylemix_idx) > 0:
w[:, stylemix_idx] = all_ws[stylemix_seed][np.newaxis, stylemix_idx]
w += w_avg
# Run synthesis network.
synthesis_kwargs = dnnlib.EasyDict(noise_mode=noise_mode, force_fp32=force_fp32)
torch.manual_seed(random_seed)
out, layers = self.run_synthesis_net(G.synthesis, w, capture_layer=layer_name, **synthesis_kwargs)
# Update layer list.
cache_key = (G.synthesis, tuple(sorted(synthesis_kwargs.items())))
if cache_key not in self._net_layers:
if layer_name is not None:
torch.manual_seed(random_seed)
_out, layers = self.run_synthesis_net(G.synthesis, w, **synthesis_kwargs)
self._net_layers[cache_key] = layers
res.layers = self._net_layers[cache_key]
# Untransform.
if untransform and res.has_input_transform:
out, _mask = _apply_affine_transformation(out.to(torch.float32), G.synthesis.input.transform, amax=6) # Override amax to hit the fast path in upfirdn2d.
# Select channels and compute statistics.
out = out[0].to(torch.float32)
if sel_channels > out.shape[0]:
sel_channels = 1
base_channel = max(min(base_channel, out.shape[0] - sel_channels), 0)
sel = out[base_channel : base_channel + sel_channels]
res.stats = torch.stack([
out.mean(), sel.mean(),
out.std(), sel.std(),
out.norm(float('inf')), sel.norm(float('inf')),
])
# Scale and convert to uint8.
img = sel
if img_normalize:
img = img / img.norm(float('inf'), dim=[1,2], keepdim=True).clip(1e-8, 1e8)
img = img * (10 ** (img_scale_db / 20))
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8).permute(1, 2, 0)
res.image = img
# FFT.
if fft_show:
sig = out if fft_all else sel
sig = sig.to(torch.float32)
sig = sig - sig.mean(dim=[1,2], keepdim=True)
sig = sig * torch.kaiser_window(sig.shape[1], periodic=False, beta=fft_beta, device=self._device)[None, :, None]
sig = sig * torch.kaiser_window(sig.shape[2], periodic=False, beta=fft_beta, device=self._device)[None, None, :]
fft = torch.fft.fftn(sig, dim=[1,2]).abs().square().sum(dim=0)
fft = fft.roll(shifts=[fft.shape[0] // 2, fft.shape[1] // 2], dims=[0,1])
fft = (fft / fft.mean()).log10() * 10 # dB
fft = self._apply_cmap((fft / fft_range_db + 1) / 2)
res.image = torch.cat([img.expand_as(fft), fft], dim=1)
@staticmethod
def run_synthesis_net(net, *args, capture_layer=None, **kwargs): # => out, layers
submodule_names = {mod: name for name, mod in net.named_modules()}
unique_names = set()
layers = []
def module_hook(module, _inputs, outputs):
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [out for out in outputs if isinstance(out, torch.Tensor) and out.ndim in [4, 5]]
for idx, out in enumerate(outputs):
if out.ndim == 5: # G-CNN => remove group dimension.
out = out.mean(2)
name = submodule_names[module]
if name == '':
name = 'output'
if len(outputs) > 1:
name += f':{idx}'
if name in unique_names:
suffix = 2
while f'{name}_{suffix}' in unique_names:
suffix += 1
name += f'_{suffix}'
unique_names.add(name)
shape = [int(x) for x in out.shape]
dtype = str(out.dtype).split('.')[-1]
layers.append(dnnlib.EasyDict(name=name, shape=shape, dtype=dtype))
if name == capture_layer:
raise CaptureSuccess(out)
hooks = [module.register_forward_hook(module_hook) for module in net.modules()]
try:
out = net(*args, **kwargs)
except CaptureSuccess as e:
out = e.out
for hook in hooks:
hook.remove()
return out, layers
#----------------------------------------------------------------------------
| 15,309 | 39.502646 | 164 | py |
stylegan3 | stylegan3-main/metrics/metric_utils.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Miscellaneous utilities used internally by the quality metrics."""
import os
import time
import hashlib
import pickle
import copy
import uuid
import numpy as np
import torch
import dnnlib
#----------------------------------------------------------------------------
class MetricOptions:
def __init__(self, G=None, G_kwargs={}, dataset_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, cache=True):
assert 0 <= rank < num_gpus
self.G = G
self.G_kwargs = dnnlib.EasyDict(G_kwargs)
self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs)
self.num_gpus = num_gpus
self.rank = rank
self.device = device if device is not None else torch.device('cuda', rank)
self.progress = progress.sub() if progress is not None and rank == 0 else ProgressMonitor()
self.cache = cache
#----------------------------------------------------------------------------
_feature_detector_cache = dict()
def get_feature_detector_name(url):
return os.path.splitext(url.split('/')[-1])[0]
def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):
assert 0 <= rank < num_gpus
key = (url, device)
if key not in _feature_detector_cache:
is_leader = (rank == 0)
if not is_leader and num_gpus > 1:
torch.distributed.barrier() # leader goes first
with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
_feature_detector_cache[key] = pickle.load(f).to(device)
if is_leader and num_gpus > 1:
torch.distributed.barrier() # others follow
return _feature_detector_cache[key]
#----------------------------------------------------------------------------
def iterate_random_labels(opts, batch_size):
if opts.G.c_dim == 0:
c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device)
while True:
yield c
else:
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
while True:
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_size)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
yield c
#----------------------------------------------------------------------------
class FeatureStats:
def __init__(self, capture_all=False, capture_mean_cov=False, max_items=None):
self.capture_all = capture_all
self.capture_mean_cov = capture_mean_cov
self.max_items = max_items
self.num_items = 0
self.num_features = None
self.all_features = None
self.raw_mean = None
self.raw_cov = None
def set_num_features(self, num_features):
if self.num_features is not None:
assert num_features == self.num_features
else:
self.num_features = num_features
self.all_features = []
self.raw_mean = np.zeros([num_features], dtype=np.float64)
self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64)
def is_full(self):
return (self.max_items is not None) and (self.num_items >= self.max_items)
def append(self, x):
x = np.asarray(x, dtype=np.float32)
assert x.ndim == 2
if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items):
if self.num_items >= self.max_items:
return
x = x[:self.max_items - self.num_items]
self.set_num_features(x.shape[1])
self.num_items += x.shape[0]
if self.capture_all:
self.all_features.append(x)
if self.capture_mean_cov:
x64 = x.astype(np.float64)
self.raw_mean += x64.sum(axis=0)
self.raw_cov += x64.T @ x64
def append_torch(self, x, num_gpus=1, rank=0):
assert isinstance(x, torch.Tensor) and x.ndim == 2
assert 0 <= rank < num_gpus
if num_gpus > 1:
ys = []
for src in range(num_gpus):
y = x.clone()
torch.distributed.broadcast(y, src=src)
ys.append(y)
x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples
self.append(x.cpu().numpy())
def get_all(self):
assert self.capture_all
return np.concatenate(self.all_features, axis=0)
def get_all_torch(self):
return torch.from_numpy(self.get_all())
def get_mean_cov(self):
assert self.capture_mean_cov
mean = self.raw_mean / self.num_items
cov = self.raw_cov / self.num_items
cov = cov - np.outer(mean, mean)
return mean, cov
def save(self, pkl_file):
with open(pkl_file, 'wb') as f:
pickle.dump(self.__dict__, f)
@staticmethod
def load(pkl_file):
with open(pkl_file, 'rb') as f:
s = dnnlib.EasyDict(pickle.load(f))
obj = FeatureStats(capture_all=s.capture_all, max_items=s.max_items)
obj.__dict__.update(s)
return obj
#----------------------------------------------------------------------------
class ProgressMonitor:
def __init__(self, tag=None, num_items=None, flush_interval=1000, verbose=False, progress_fn=None, pfn_lo=0, pfn_hi=1000, pfn_total=1000):
self.tag = tag
self.num_items = num_items
self.verbose = verbose
self.flush_interval = flush_interval
self.progress_fn = progress_fn
self.pfn_lo = pfn_lo
self.pfn_hi = pfn_hi
self.pfn_total = pfn_total
self.start_time = time.time()
self.batch_time = self.start_time
self.batch_items = 0
if self.progress_fn is not None:
self.progress_fn(self.pfn_lo, self.pfn_total)
def update(self, cur_items):
assert (self.num_items is None) or (cur_items <= self.num_items)
if (cur_items < self.batch_items + self.flush_interval) and (self.num_items is None or cur_items < self.num_items):
return
cur_time = time.time()
total_time = cur_time - self.start_time
time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
if (self.verbose) and (self.tag is not None):
print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')
self.batch_time = cur_time
self.batch_items = cur_items
if (self.progress_fn is not None) and (self.num_items is not None):
self.progress_fn(self.pfn_lo + (self.pfn_hi - self.pfn_lo) * (cur_items / self.num_items), self.pfn_total)
def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1):
return ProgressMonitor(
tag = tag,
num_items = num_items,
flush_interval = flush_interval,
verbose = self.verbose,
progress_fn = self.progress_fn,
pfn_lo = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_lo,
pfn_hi = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_hi,
pfn_total = self.pfn_total,
)
#----------------------------------------------------------------------------
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
if data_loader_kwargs is None:
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
# Try to lookup from cache.
cache_file = None
if opts.cache:
# Choose cache file name.
args = dict(dataset_kwargs=opts.dataset_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)
md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))
cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}'
cache_file = dnnlib.make_cache_dir_path('gan-metrics', cache_tag + '.pkl')
# Check if the file exists (all processes must agree).
flag = os.path.isfile(cache_file) if opts.rank == 0 else False
if opts.num_gpus > 1:
flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)
torch.distributed.broadcast(tensor=flag, src=0)
flag = (float(flag.cpu()) != 0)
# Load.
if flag:
return FeatureStats.load(cache_file)
# Initialize.
num_items = len(dataset)
if max_items is not None:
num_items = min(num_items, max_items)
stats = FeatureStats(max_items=num_items, **stats_kwargs)
progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
item_subset = [(i * opts.num_gpus + opts.rank) % num_items for i in range((num_items - 1) // opts.num_gpus + 1)]
for images, _labels in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs):
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images.to(opts.device), **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
# Save to cache.
if cache_file is not None and opts.rank == 0:
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
temp_file = cache_file + '.' + uuid.uuid4().hex
stats.save(temp_file)
os.replace(temp_file, cache_file) # atomic
return stats
#----------------------------------------------------------------------------
def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, batch_gen=None, **stats_kwargs):
if batch_gen is None:
batch_gen = min(batch_size, 4)
assert batch_size % batch_gen == 0
# Setup generator and labels.
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
c_iter = iterate_random_labels(opts=opts, batch_size=batch_gen)
# Initialize.
stats = FeatureStats(**stats_kwargs)
assert stats.max_items is not None
progress = opts.progress.sub(tag='generator features', num_items=stats.max_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
while not stats.is_full():
images = []
for _i in range(batch_size // batch_gen):
z = torch.randn([batch_gen, G.z_dim], device=opts.device)
img = G(z=z, c=next(c_iter), **opts.G_kwargs)
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8)
images.append(img)
images = torch.cat(images)
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images, **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
return stats
#----------------------------------------------------------------------------
| 11,936 | 41.632143 | 167 | py |
stylegan3 | stylegan3-main/metrics/equivariance.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Equivariance metrics (EQ-T, EQ-T_frac, and EQ-R) from the paper
"Alias-Free Generative Adversarial Networks"."""
import copy
import numpy as np
import torch
import torch.fft
from torch_utils.ops import upfirdn2d
from . import metric_utils
#----------------------------------------------------------------------------
# Utilities.
def sinc(x):
y = (x * np.pi).abs()
z = torch.sin(y) / y.clamp(1e-30, float('inf'))
return torch.where(y < 1e-30, torch.ones_like(x), z)
def lanczos_window(x, a):
x = x.abs() / a
return torch.where(x < 1, sinc(x), torch.zeros_like(x))
def rotation_matrix(angle):
angle = torch.as_tensor(angle).to(torch.float32)
mat = torch.eye(3, device=angle.device)
mat[0, 0] = angle.cos()
mat[0, 1] = angle.sin()
mat[1, 0] = -angle.sin()
mat[1, 1] = angle.cos()
return mat
#----------------------------------------------------------------------------
# Apply integer translation to a batch of 2D images. Corresponds to the
# operator T_x in Appendix E.1.
def apply_integer_translation(x, tx, ty):
_N, _C, H, W = x.shape
tx = torch.as_tensor(tx * W).to(dtype=torch.float32, device=x.device)
ty = torch.as_tensor(ty * H).to(dtype=torch.float32, device=x.device)
ix = tx.round().to(torch.int64)
iy = ty.round().to(torch.int64)
z = torch.zeros_like(x)
m = torch.zeros_like(x)
if abs(ix) < W and abs(iy) < H:
y = x[:, :, max(-iy,0) : H+min(-iy,0), max(-ix,0) : W+min(-ix,0)]
z[:, :, max(iy,0) : H+min(iy,0), max(ix,0) : W+min(ix,0)] = y
m[:, :, max(iy,0) : H+min(iy,0), max(ix,0) : W+min(ix,0)] = 1
return z, m
#----------------------------------------------------------------------------
# Apply integer translation to a batch of 2D images. Corresponds to the
# operator T_x in Appendix E.2.
def apply_fractional_translation(x, tx, ty, a=3):
_N, _C, H, W = x.shape
tx = torch.as_tensor(tx * W).to(dtype=torch.float32, device=x.device)
ty = torch.as_tensor(ty * H).to(dtype=torch.float32, device=x.device)
ix = tx.floor().to(torch.int64)
iy = ty.floor().to(torch.int64)
fx = tx - ix
fy = ty - iy
b = a - 1
z = torch.zeros_like(x)
zx0 = max(ix - b, 0)
zy0 = max(iy - b, 0)
zx1 = min(ix + a, 0) + W
zy1 = min(iy + a, 0) + H
if zx0 < zx1 and zy0 < zy1:
taps = torch.arange(a * 2, device=x.device) - b
filter_x = (sinc(taps - fx) * sinc((taps - fx) / a)).unsqueeze(0)
filter_y = (sinc(taps - fy) * sinc((taps - fy) / a)).unsqueeze(1)
y = x
y = upfirdn2d.filter2d(y, filter_x / filter_x.sum(), padding=[b,a,0,0])
y = upfirdn2d.filter2d(y, filter_y / filter_y.sum(), padding=[0,0,b,a])
y = y[:, :, max(b-iy,0) : H+b+a+min(-iy-a,0), max(b-ix,0) : W+b+a+min(-ix-a,0)]
z[:, :, zy0:zy1, zx0:zx1] = y
m = torch.zeros_like(x)
mx0 = max(ix + a, 0)
my0 = max(iy + a, 0)
mx1 = min(ix - b, 0) + W
my1 = min(iy - b, 0) + H
if mx0 < mx1 and my0 < my1:
m[:, :, my0:my1, mx0:mx1] = 1
return z, m
#----------------------------------------------------------------------------
# Construct an oriented low-pass filter that applies the appropriate
# bandlimit with respect to the input and output of the given affine 2D
# image transformation.
def construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):
assert a <= amax < aflt
mat = torch.as_tensor(mat).to(torch.float32)
# Construct 2D filter taps in input & output coordinate spaces.
taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up)
yi, xi = torch.meshgrid(taps, taps)
xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2)
# Convolution of two oriented 2D sinc filters.
fi = sinc(xi * cutoff_in) * sinc(yi * cutoff_in)
fo = sinc(xo * cutoff_out) * sinc(yo * cutoff_out)
f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real
# Convolution of two oriented 2D Lanczos windows.
wi = lanczos_window(xi, a) * lanczos_window(yi, a)
wo = lanczos_window(xo, a) * lanczos_window(yo, a)
w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real
# Construct windowed FIR filter.
f = f * w
# Finalize.
c = (aflt - amax) * up
f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c]
f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up)
f = f / f.sum([0,2], keepdim=True) / (up ** 2)
f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1]
return f
#----------------------------------------------------------------------------
# Apply the given affine transformation to a batch of 2D images.
def apply_affine_transformation(x, mat, up=4, **filter_kwargs):
_N, _C, H, W = x.shape
mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)
# Construct filter.
f = construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)
assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1
p = f.shape[0] // 2
# Construct sampling grid.
theta = mat.inverse()
theta[:2, 2] *= 2
theta[0, 2] += 1 / up / W
theta[1, 2] += 1 / up / H
theta[0, :] *= W / (W + p / up * 2)
theta[1, :] *= H / (H + p / up * 2)
theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])
g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)
# Resample image.
y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)
z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)
# Form mask.
m = torch.zeros_like(y)
c = p * 2 + 1
m[:, :, c:-c, c:-c] = 1
m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)
return z, m
#----------------------------------------------------------------------------
# Apply fractional rotation to a batch of 2D images. Corresponds to the
# operator R_\alpha in Appendix E.3.
def apply_fractional_rotation(x, angle, a=3, **filter_kwargs):
angle = torch.as_tensor(angle).to(dtype=torch.float32, device=x.device)
mat = rotation_matrix(angle)
return apply_affine_transformation(x, mat, a=a, amax=a*2, **filter_kwargs)
#----------------------------------------------------------------------------
# Modify the frequency content of a batch of 2D images as if they had undergo
# fractional rotation -- but without actually rotating them. Corresponds to
# the operator R^*_\alpha in Appendix E.3.
def apply_fractional_pseudo_rotation(x, angle, a=3, **filter_kwargs):
angle = torch.as_tensor(angle).to(dtype=torch.float32, device=x.device)
mat = rotation_matrix(-angle)
f = construct_affine_bandlimit_filter(mat, a=a, amax=a*2, up=1, **filter_kwargs)
y = upfirdn2d.filter2d(x=x, f=f)
m = torch.zeros_like(y)
c = f.shape[0] // 2
m[:, :, c:-c, c:-c] = 1
return y, m
#----------------------------------------------------------------------------
# Compute the selected equivariance metrics for the given generator.
def compute_equivariance_metrics(opts, num_samples, batch_size, translate_max=0.125, rotate_max=1, compute_eqt_int=False, compute_eqt_frac=False, compute_eqr=False):
assert compute_eqt_int or compute_eqt_frac or compute_eqr
# Setup generator and labels.
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
I = torch.eye(3, device=opts.device)
M = getattr(getattr(getattr(G, 'synthesis', None), 'input', None), 'transform', None)
if M is None:
raise ValueError('Cannot compute equivariance metrics; the given generator does not support user-specified image transformations')
c_iter = metric_utils.iterate_random_labels(opts=opts, batch_size=batch_size)
# Sampling loop.
sums = None
progress = opts.progress.sub(tag='eq sampling', num_items=num_samples)
for batch_start in range(0, num_samples, batch_size * opts.num_gpus):
progress.update(batch_start)
s = []
# Randomize noise buffers, if any.
for name, buf in G.named_buffers():
if name.endswith('.noise_const'):
buf.copy_(torch.randn_like(buf))
# Run mapping network.
z = torch.randn([batch_size, G.z_dim], device=opts.device)
c = next(c_iter)
ws = G.mapping(z=z, c=c)
# Generate reference image.
M[:] = I
orig = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
# Integer translation (EQ-T).
if compute_eqt_int:
t = (torch.rand(2, device=opts.device) * 2 - 1) * translate_max
t = (t * G.img_resolution).round() / G.img_resolution
M[:] = I
M[:2, 2] = -t
img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
ref, mask = apply_integer_translation(orig, t[0], t[1])
s += [(ref - img).square() * mask, mask]
# Fractional translation (EQ-T_frac).
if compute_eqt_frac:
t = (torch.rand(2, device=opts.device) * 2 - 1) * translate_max
M[:] = I
M[:2, 2] = -t
img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
ref, mask = apply_fractional_translation(orig, t[0], t[1])
s += [(ref - img).square() * mask, mask]
# Rotation (EQ-R).
if compute_eqr:
angle = (torch.rand([], device=opts.device) * 2 - 1) * (rotate_max * np.pi)
M[:] = rotation_matrix(-angle)
img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
ref, ref_mask = apply_fractional_rotation(orig, angle)
pseudo, pseudo_mask = apply_fractional_pseudo_rotation(img, angle)
mask = ref_mask * pseudo_mask
s += [(ref - pseudo).square() * mask, mask]
# Accumulate results.
s = torch.stack([x.to(torch.float64).sum() for x in s])
sums = sums + s if sums is not None else s
progress.update(num_samples)
# Compute PSNRs.
if opts.num_gpus > 1:
torch.distributed.all_reduce(sums)
sums = sums.cpu()
mses = sums[0::2] / sums[1::2]
psnrs = np.log10(2) * 20 - mses.log10() * 10
psnrs = tuple(psnrs.numpy())
return psnrs[0] if len(psnrs) == 1 else psnrs
#----------------------------------------------------------------------------
| 10,868 | 39.55597 | 165 | py |
stylegan3 | stylegan3-main/metrics/perceptual_path_length.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Perceptual Path Length (PPL) from the paper "A Style-Based Generator
Architecture for Generative Adversarial Networks". Matches the original
implementation by Karras et al. at
https://github.com/NVlabs/stylegan/blob/master/metrics/perceptual_path_length.py"""
import copy
import numpy as np
import torch
from . import metric_utils
#----------------------------------------------------------------------------
# Spherical interpolation of a batch of vectors.
def slerp(a, b, t):
a = a / a.norm(dim=-1, keepdim=True)
b = b / b.norm(dim=-1, keepdim=True)
d = (a * b).sum(dim=-1, keepdim=True)
p = t * torch.acos(d)
c = b - d * a
c = c / c.norm(dim=-1, keepdim=True)
d = a * torch.cos(p) + c * torch.sin(p)
d = d / d.norm(dim=-1, keepdim=True)
return d
#----------------------------------------------------------------------------
class PPLSampler(torch.nn.Module):
def __init__(self, G, G_kwargs, epsilon, space, sampling, crop, vgg16):
assert space in ['z', 'w']
assert sampling in ['full', 'end']
super().__init__()
self.G = copy.deepcopy(G)
self.G_kwargs = G_kwargs
self.epsilon = epsilon
self.space = space
self.sampling = sampling
self.crop = crop
self.vgg16 = copy.deepcopy(vgg16)
def forward(self, c):
# Generate random latents and interpolation t-values.
t = torch.rand([c.shape[0]], device=c.device) * (1 if self.sampling == 'full' else 0)
z0, z1 = torch.randn([c.shape[0] * 2, self.G.z_dim], device=c.device).chunk(2)
# Interpolate in W or Z.
if self.space == 'w':
w0, w1 = self.G.mapping(z=torch.cat([z0,z1]), c=torch.cat([c,c])).chunk(2)
wt0 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2))
wt1 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2) + self.epsilon)
else: # space == 'z'
zt0 = slerp(z0, z1, t.unsqueeze(1))
zt1 = slerp(z0, z1, t.unsqueeze(1) + self.epsilon)
wt0, wt1 = self.G.mapping(z=torch.cat([zt0,zt1]), c=torch.cat([c,c])).chunk(2)
# Randomize noise buffers.
for name, buf in self.G.named_buffers():
if name.endswith('.noise_const'):
buf.copy_(torch.randn_like(buf))
# Generate images.
img = self.G.synthesis(ws=torch.cat([wt0,wt1]), noise_mode='const', force_fp32=True, **self.G_kwargs)
# Center crop.
if self.crop:
assert img.shape[2] == img.shape[3]
c = img.shape[2] // 8
img = img[:, :, c*3 : c*7, c*2 : c*6]
# Downsample to 256x256.
factor = self.G.img_resolution // 256
if factor > 1:
img = img.reshape([-1, img.shape[1], img.shape[2] // factor, factor, img.shape[3] // factor, factor]).mean([3, 5])
# Scale dynamic range from [-1,1] to [0,255].
img = (img + 1) * (255 / 2)
if self.G.img_channels == 1:
img = img.repeat([1, 3, 1, 1])
# Evaluate differential LPIPS.
lpips_t0, lpips_t1 = self.vgg16(img, resize_images=False, return_lpips=True).chunk(2)
dist = (lpips_t0 - lpips_t1).square().sum(1) / self.epsilon ** 2
return dist
#----------------------------------------------------------------------------
def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size):
vgg16_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/vgg16.pkl'
vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
# Setup sampler and labels.
sampler = PPLSampler(G=opts.G, G_kwargs=opts.G_kwargs, epsilon=epsilon, space=space, sampling=sampling, crop=crop, vgg16=vgg16)
sampler.eval().requires_grad_(False).to(opts.device)
c_iter = metric_utils.iterate_random_labels(opts=opts, batch_size=batch_size)
# Sampling loop.
dist = []
progress = opts.progress.sub(tag='ppl sampling', num_items=num_samples)
for batch_start in range(0, num_samples, batch_size * opts.num_gpus):
progress.update(batch_start)
x = sampler(next(c_iter))
for src in range(opts.num_gpus):
y = x.clone()
if opts.num_gpus > 1:
torch.distributed.broadcast(y, src=src)
dist.append(y)
progress.update(num_samples)
# Compute PPL.
if opts.rank != 0:
return float('nan')
dist = torch.cat(dist)[:num_samples].cpu().numpy()
lo = np.percentile(dist, 1, interpolation='lower')
hi = np.percentile(dist, 99, interpolation='higher')
ppl = np.extract(np.logical_and(dist >= lo, dist <= hi), dist).mean()
return float(ppl)
#----------------------------------------------------------------------------
| 5,256 | 40.722222 | 131 | py |
stylegan3 | stylegan3-main/metrics/metric_main.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Main API for computing and reporting quality metrics."""
import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
#----------------------------------------------------------------------------
_metric_dict = dict() # name => fn
def register_metric(fn):
assert callable(fn)
_metric_dict[fn.__name__] = fn
return fn
def is_valid_metric(metric):
return metric in _metric_dict
def list_valid_metrics():
return list(_metric_dict.keys())
#----------------------------------------------------------------------------
def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full list of arguments.
assert is_valid_metric(metric)
opts = metric_utils.MetricOptions(**kwargs)
# Calculate.
start_time = time.time()
results = _metric_dict[metric](opts)
total_time = time.time() - start_time
# Broadcast results.
for key, value in list(results.items()):
if opts.num_gpus > 1:
value = torch.as_tensor(value, dtype=torch.float64, device=opts.device)
torch.distributed.broadcast(tensor=value, src=0)
value = float(value.cpu())
results[key] = value
# Decorate with metadata.
return dnnlib.EasyDict(
results = dnnlib.EasyDict(results),
metric = metric,
total_time = total_time,
total_time_str = dnnlib.util.format_time(total_time),
num_gpus = opts.num_gpus,
)
#----------------------------------------------------------------------------
def report_metric(result_dict, run_dir=None, snapshot_pkl=None):
metric = result_dict['metric']
assert is_valid_metric(metric)
if run_dir is not None and snapshot_pkl is not None:
snapshot_pkl = os.path.relpath(snapshot_pkl, run_dir)
jsonl_line = json.dumps(dict(result_dict, snapshot_pkl=snapshot_pkl, timestamp=time.time()))
print(jsonl_line)
if run_dir is not None and os.path.isdir(run_dir):
with open(os.path.join(run_dir, f'metric-{metric}.jsonl'), 'at') as f:
f.write(jsonl_line + '\n')
#----------------------------------------------------------------------------
# Recommended metrics.
@register_metric
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid)
@register_metric
def kid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k_full=kid)
@register_metric
def pr50k3_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall)
@register_metric
def ppl2_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, batch_size=2)
return dict(ppl2_wend=ppl)
@register_metric
def eqt50k_int(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_int=True)
return dict(eqt50k_int=psnr)
@register_metric
def eqt50k_frac(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_frac=True)
return dict(eqt50k_frac=psnr)
@register_metric
def eqr50k(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqr=True)
return dict(eqr50k=psnr)
#----------------------------------------------------------------------------
# Legacy metrics.
@register_metric
def fid50k(opts):
opts.dataset_kwargs.update(max_size=None)
fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000)
return dict(fid50k=fid)
@register_metric
def kid50k(opts):
opts.dataset_kwargs.update(max_size=None)
kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k=kid)
@register_metric
def pr50k3(opts):
opts.dataset_kwargs.update(max_size=None)
precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_precision=precision, pr50k3_recall=recall)
@register_metric
def is50k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10)
return dict(is50k_mean=mean, is50k_std=std)
#----------------------------------------------------------------------------
| 5,675 | 35.857143 | 147 | py |
stylegan3 | stylegan3-main/metrics/precision_recall.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Precision/Recall (PR) from the paper "Improved Precision and Recall
Metric for Assessing Generative Models". Matches the original implementation
by Kynkaanniemi et al. at
https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py"""
import torch
from . import metric_utils
#----------------------------------------------------------------------------
def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size):
assert 0 <= rank < num_gpus
num_cols = col_features.shape[0]
num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus
col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches)
dist_batches = []
for col_batch in col_batches[rank :: num_gpus]:
dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0]
for src in range(num_gpus):
dist_broadcast = dist_batch.clone()
if num_gpus > 1:
torch.distributed.broadcast(dist_broadcast, src=src)
dist_batches.append(dist_broadcast.cpu() if rank == 0 else None)
return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None
#----------------------------------------------------------------------------
def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size):
detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/vgg16.pkl'
detector_kwargs = dict(return_features=True)
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device)
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device)
results = dict()
for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]:
kth = []
for manifold_batch in manifold.split(row_batch_size):
dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None)
kth = torch.cat(kth) if opts.rank == 0 else None
pred = []
for probes_batch in probes.split(row_batch_size):
dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None)
results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan')
return results['precision'], results['recall']
#----------------------------------------------------------------------------
| 3,644 | 56.857143 | 159 | py |
MaskTextSpotter | MaskTextSpotter-master/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "maskrcnn_benchmark", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"maskrcnn_benchmark._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="maskrcnn_benchmark",
version="0.1",
author="fmassa",
url="https://github.com/facebookresearch/maskrnn-benchmark",
description="object detection in pytorch",
# packages=find_packages(exclude=("configs", "examples", "test",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 2,055 | 28.371429 | 73 | py |
MaskTextSpotter | MaskTextSpotter-master/tools/test_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.text_inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logging import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="./configs/seq.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.deprecated.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
checkpointer = DetectronCheckpointer(cfg, model)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
if cfg.OUTPUT_DIR:
dataset_names = cfg.DATASETS.TEST
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
model_name = cfg.MODEL.WEIGHT.split('/')[-1]
for output_folder, data_loader_val in zip(output_folders, data_loaders_val):
inference(
model,
data_loader_val,
iou_types=iou_types,
box_only=cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
model_name=model_name,
cfg=cfg,
)
synchronize()
if __name__ == "__main__":
main()
| 3,340 | 33.802083 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/tools/demo.py | import os
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.utils.chars import getstr_grid, get_tight_rect
from PIL import Image
import numpy as np
import argparse
class TextDemo(object):
def __init__(
self,
cfg,
confidence_threshold=0.7,
min_image_size=224,
output_polygon=True
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
checkpointer = DetectronCheckpointer(cfg, self.model)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.output_polygon = output_polygon
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
result_polygons (list): detection results
result_words (list): recognition results
"""
result_polygons, result_words = self.compute_prediction(image)
return result_polygons, result_words
def compute_prediction(self, original_image):
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
global_predictions = predictions[0]
char_predictions = predictions[1]
char_mask = char_predictions['char_mask']
char_boxes = char_predictions['boxes']
words, rec_scores = self.process_char_mask(char_mask, char_boxes)
seq_words = char_predictions['seq_outputs']
seq_scores = char_predictions['seq_scores']
global_predictions = [o.to(self.cpu_device) for o in global_predictions]
# always single image is passed at a time
global_prediction = global_predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
global_prediction = global_prediction.resize((width, height))
boxes = global_prediction.bbox.tolist()
scores = global_prediction.get_field("scores").tolist()
masks = global_prediction.get_field("mask").cpu().numpy()
result_polygons = []
result_words = []
for k, box in enumerate(boxes):
score = scores[k]
if score < self.confidence_threshold:
continue
box = list(map(int, box))
mask = masks[k,0,:,:]
polygon = self.mask2polygon(mask, box, original_image.shape, threshold=0.5, output_polygon=self.output_polygon)
if polygon is None:
polygon = [box[0], box[1], box[2], box[1], box[2], box[3], box[0], box[3]]
result_polygons.append(polygon)
word = words[k]
rec_score = rec_scores[k]
seq_word = seq_words[k]
seq_char_scores = seq_scores[k]
seq_score = sum(seq_char_scores) / float(len(seq_char_scores))
if seq_score > rec_score:
result_words.append(seq_word)
else:
result_words.append(word)
return result_polygons, result_words
def process_char_mask(self, char_masks, boxes, threshold=192):
texts, rec_scores = [], []
for index in range(char_masks.shape[0]):
box = list(boxes[index])
box = list(map(int, box))
text, rec_score, _, _ = getstr_grid(char_masks[index,:,:,:].copy(), box, threshold=threshold)
texts.append(text)
rec_scores.append(rec_score)
return texts, rec_scores
def mask2polygon(self, mask, box, im_size, threshold=0.5, output_polygon=True):
# mask 32*128
image_width, image_height = im_size[1], im_size[0]
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cls_polys = (mask*255).astype(np.uint8)
poly_map = np.array(Image.fromarray(cls_polys).resize((box_w, box_h)))
poly_map = poly_map.astype(np.float32) / 255
poly_map=cv2.GaussianBlur(poly_map,(3,3),sigmaX=3)
ret, poly_map = cv2.threshold(poly_map,0.5,1,cv2.THRESH_BINARY)
if output_polygon:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
try:
_, contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
except:
contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours)==0:
print(contours)
print(len(contours))
return None
max_area=0
max_cnt = contours[0]
for cnt in contours:
area=cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_cnt = cnt
perimeter = cv2.arcLength(max_cnt,True)
epsilon = 0.01*cv2.arcLength(max_cnt,True)
approx = cv2.approxPolyDP(max_cnt,epsilon,True)
pts = approx.reshape((-1,2))
pts[:,0] = pts[:,0] + box[0]
pts[:,1] = pts[:,1] + box[1]
polygon = list(pts.reshape((-1,)))
polygon = list(map(int, polygon))
if len(polygon)<6:
return None
else:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
idy,idx=np.where(poly_map == 1)
xy=np.vstack((idx,idy))
xy=np.transpose(xy)
hull = cv2.convexHull(xy, clockwise=True)
#reverse order of points.
if hull is None:
return None
hull=hull[::-1]
#find minimum area bounding box.
rect = cv2.minAreaRect(hull)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, box[0], box[1], image_height, image_width, 1)
polygon = [x * 1.0 for x in pts]
polygon = list(map(int, polygon))
return polygon
def visualization(self, image, polygons, words):
for polygon, word in zip(polygons, words):
pts = np.array(polygon, np.int32)
pts = pts.reshape((-1,1,2))
xmin = min(pts[:,0,0])
ymin = min(pts[:,0,1])
cv2.polylines(image,[pts],True,(0,0,255))
cv2.putText(image, word, (xmin, ymin), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
def main(args):
# update the config options with the config file
cfg.merge_from_file(args.config_file)
# manual override some options
# cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
text_demo = TextDemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
output_polygon=True
)
# load image and then run prediction
image = cv2.imread(args.image_path)
result_polygons, result_words = text_demo.run_on_opencv_image(image)
text_demo.visualization(image, result_polygons, result_words)
cv2.imwrite(args.visu_path, image)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='parameters for demo')
parser.add_argument("--config-file", type=str, default='configs/finetune.yaml')
parser.add_argument("--image_path", type=str, default='./demo_images/demo.jpg')
parser.add_argument("--visu_path", type=str, default='./demo_images/demo_results.jpg')
args = parser.parse_args()
main(args)
| 9,596 | 39.154812 | 123 | py |
MaskTextSpotter | MaskTextSpotter-master/tools/train_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
Basic training script for PyTorch
"""
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logging import setup_logger, Logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def train(cfg, local_rank, distributed):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.SOLVER.RESUME)
if cfg.SOLVER.RESUME:
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=arguments["iteration"],
)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
tb_logger = Logger(cfg.OUTPUT_DIR)
do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
tb_logger,
cfg,
)
return model
def test(cfg, model, distributed):
if distributed:
model = model.module
torch.cuda.empty_cache() # TODO check if it helps
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
if cfg.OUTPUT_DIR:
dataset_names = cfg.DATASETS.TEST
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, data_loader_val in zip(output_folders, data_loaders_val):
inference(
model,
data_loader_val,
iou_types=iou_types,
box_only=cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
mkdir(output_dir)
logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = train(cfg, args.local_rank, args.distributed)
if not args.skip_test:
test(cfg, model, args.distributed)
if __name__ == "__main__":
main()
| 5,444 | 30.293103 | 89 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/solver/lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 1,810 | 33.169811 | 80 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/solver/build.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .lr_scheduler import WarmupMultiStepLR
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.USE_ADAM:
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
else:
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
| 1,083 | 29.111111 | 79 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/config/paths_catalog.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
def add_gt_path(data_dir, attrs):
if len(attrs)>1:
return os.path.join(data_dir,attrs[1])
else:
return None
class DatasetCatalog(object):
DATA_DIR = "datasets"
DATASETS = {
"coco_2014_train": (
"coco/train2014",
"coco/annotations/instances_train2014.json",
),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": (
"coco/val2014",
"coco/annotations/instances_minival2014.json",
),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"icdar_2013_train":(
"icdar2013/train_images",
"icdar2013/train_gts",
),
"icdar_2013_test":(
"icdar2013/test_images",
"icdar2013/test_gts",
),
"icdar_2015_train": (
"icdar2015/train_images",
"icdar2015/train_gts",
),
"icdar_2015_test": (
"icdar2015/test_images",
# "icdar2015/test_gts",
),
"synthtext_train":(
"synthtext/train_images",
"synthtext/train_gts",
),
"synthtext_test": (
"synthtext/test_images",
"synthtext/test_gts",
),
"total_text_train": (
"total_text/train_images",
"total_text/train_gts",
),
"total_text_test": (
"total_text/test_images",
# "total_text/test_gts",
),
"scut-eng-char_train":(
"scut-eng-char/train_images",
"scut-eng-char/train_gts",
),
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs[0]),
ann_file=os.path.join(data_dir, attrs[1]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "icdar_2013" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs=DatasetCatalog.DATASETS[name]
args=dict(
use_charann=True,
imgs_dir=os.path.join(data_dir,attrs[0]),
gts_dir=add_gt_path(data_dir,attrs),
)
return dict(
args=args,
factory="IcdarDataset",
)
elif "icdar_2015" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs=DatasetCatalog.DATASETS[name]
if len(attrs)>1:
gts_dir = os.path.join(data_dir,attrs[1])
else:
gts_dir = None
args=dict(
use_charann=False,
imgs_dir=os.path.join(data_dir,attrs[0]),
gts_dir=gts_dir,
)
return dict(
args=args,
factory="IcdarDataset",
)
elif "synthtext" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs=DatasetCatalog.DATASETS[name]
args=dict(
use_charann=True,
list_file_path=os.path.join(data_dir, 'synthtext/train_list.txt'),
imgs_dir=os.path.join(data_dir,attrs[0]),
gts_dir=add_gt_path(data_dir,attrs),
)
return dict(
args=args,
factory="SynthtextDataset",
)
elif "total_text" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
if len(attrs)>1:
gts_dir = os.path.join(data_dir,attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
)
return dict(
args=args,
factory="TotaltextDataset",
)
elif "scut-eng-char" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
)
return dict(
args=args,
factory="ScutDataset",
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
# S3_C2_DETECTRON_URL = "https://s3-us-west-2.amazonaws.com/detectron"
# C2_IMAGENET_MODELS = {
# "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
# "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
# "FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
# }
# C2_DETECTRON_SUFFIX = "output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl"
# C2_DETECTRON_MODELS = {
# "35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
# "35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
# "35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
# "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
# "35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
# "35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
# "35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
# "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
# }
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
"37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
# keypoints
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| 8,907 | 36.745763 | 121 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/layers/batch_norm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters
are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x):
scale = self.weight * self.running_var.rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
| 799 | 31 | 71 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/layers/roi_pool.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
output, argmax = _C.roi_pool_forward(
input, roi, spatial_scale, output_size[0], output_size[1]
)
ctx.save_for_backward(input, roi, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, argmax = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_pool_backward(
grad_output,
input,
rois,
argmax,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
)
return grad_input, None, None, None
roi_pool = _ROIPool.apply
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
| 1,855 | 28 | 74 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/layers/roi_align.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
return roi_align(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 2,110 | 29.594203 | 85 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/layers/smooth_l1_loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# TODO maybe push this to nn?
def smooth_l1_loss(input, target, beta=1. / 9, size_average=True):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if size_average:
return loss.mean()
return loss.sum()
| 481 | 27.352941 | 71 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/layers/_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import glob
import os.path
import torch
try:
from torch.utils.cpp_extension import load as load_ext
from torch.utils.cpp_extension import CUDA_HOME
except ImportError:
raise ImportError("The cpp layer extensions requires PyTorch 0.4 or higher")
def _load_C_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
this_dir = os.path.dirname(this_dir)
this_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
source = main_file + source_cpu
extra_cflags = []
if torch.cuda.is_available() and CUDA_HOME is not None:
source.extend(source_cuda)
extra_cflags = ["-DWITH_CUDA"]
source = [os.path.join(this_dir, s) for s in source]
extra_include_paths = [this_dir]
return load_ext(
"torchvision",
source,
extra_cflags=extra_cflags,
extra_include_paths=extra_include_paths,
)
_C = _load_C_extensions()
| 1,165 | 28.15 | 80 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/layers/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
helper class that supports empty tensors on some nn functions.
Ideally, add support directly in PyTorch to empty tensors in
those functions.
This can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
import math
import torch
from torch.nn.modules.utils import _ntuple
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
def forward(self, x):
if x.numel() > 0:
return super(Conv2d, self).forward(x)
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class ConvTranspose2d(torch.nn.ConvTranspose2d):
def forward(self, x):
if x.numel() > 0:
return super(ConvTranspose2d, self).forward(x)
# get output shape
output_shape = [
(i - 1) * d - 2 * p + (di * (k - 1) + 1) + op
for i, p, di, k, d, op in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
self.output_padding,
)
]
output_shape = [x.shape[0], self.bias.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
def interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
if (
scale_factor is not None
and isinstance(scale_factor, tuple)
and len(scale_factor) != dim
):
raise ValueError(
"scale_factor shape must match input shape. "
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
)
def _output_size(dim):
_check_size_scale_factor(dim)
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
# math.floor might return float in py2.7
return [
int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
| 3,241 | 30.475728 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/layers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .batch_norm import FrozenBatchNorm2d
from .misc import Conv2d
from .misc import ConvTranspose2d
from .misc import interpolate
from .nms import nms
from .roi_align import ROIAlign
from .roi_align import roi_align
from .roi_pool import ROIPool
from .roi_pool import roi_pool
from .smooth_l1_loss import smooth_l1_loss
__all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", "FrozenBatchNorm2d"]
| 557 | 33.875 | 148 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/engine/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import tempfile
import time
import os
from collections import OrderedDict
import torch
from tqdm import tqdm
from ..structures.bounding_box import BoxList
from ..utils.comm import is_main_process
from ..utils.comm import scatter_gather
from ..utils.comm import synchronize
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
def compute_on_dataset(model, data_loader, device):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for i, batch in tqdm(enumerate(data_loader)):
images, targets, image_ids = batch
images = images.to(device)
with torch.no_grad():
output = model(images)
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
return results_dict
def prepare_for_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(predictions, dataset):
import pycocotools.mask as mask_util
import numpy as np
masker = Masker(threshold=0.5, padding=1)
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in tqdm(enumerate(predictions)):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
masks = prediction.get_field("mask")
# t = time.time()
masks = masker(masks, prediction)
# logger.info('Time mask: {}'.format(time.time() - t))
# prediction = prediction.convert('xywh')
# boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
# rles = prediction.get_field('mask')
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
# inspired from Detectron
def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def evaluate_predictions_on_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(str(json_result_file))
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = scatter_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("maskrcnn_benchmark.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
class COCOResults(object):
METRICS = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"box_proposal": [
"AR@100",
"ARs@100",
"ARm@100",
"ARl@100",
"AR@1000",
"ARs@1000",
"ARm@1000",
"ARl@1000",
],
"keypoint": ["AP", "AP50", "AP75", "APm", "APl"],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResults.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
# TODO make it pretty
return repr(self.results)
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
logger = logging.getLogger("maskrcnn_benchmark.inference")
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = (
"{} > {} sanity check (actual vs. expected): "
"{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = "FAIL: " + msg
logger.error(msg)
else:
msg = "PASS: " + msg
logger.info(msg)
def inference(
model,
data_loader,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = (
torch.distributed.deprecated.get_world_size()
if torch.distributed.deprecated.is_initialized()
else 1
)
logger = logging.getLogger("maskrcnn_benchmark.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} images".format(len(dataset)))
start_time = time.time()
predictions = compute_on_dataset(model, data_loader, device)
# wait for all processes to complete before measuring the time
synchronize()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
logger.info(
"Total inference time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = COCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
logger.info("Preparing results for COCO format")
coco_results = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
if "segm" in iou_types:
logger.info("Preparing segm results")
coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset)
results = COCOResults(*iou_types)
logger.info("Evaluating predictions")
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + ".json")
res = evaluate_predictions_on_coco(
dataset.coco, coco_results[iou_type], file_path, iou_type
)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, "coco_results.pth"))
| 14,699 | 33.265734 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/engine/text_inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import tempfile
import time
import os
from collections import OrderedDict
import torch
from tqdm import tqdm
from ..structures.bounding_box import BoxList
from ..utils.comm import is_main_process
from ..utils.comm import scatter_gather
from ..utils.comm import synchronize
import torch.distributed as dist
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.utils.chars import getstr_grid, get_tight_rect, char2num
from PIL import Image, ImageDraw
import numpy as np
import cv2
import pickle
# def compute_on_dataset(model, data_loader, device):
# model.eval()
# results_dict = {}
# cpu_device = torch.device("cpu")
# for i, batch in tqdm(enumerate(data_loader)):
# images, targets, image_paths = batch
# images = images.to(device)
# with torch.no_grad():
# predictions = model(images)
# if predictions is not None:
# global_predictions = predictions[0]
# char_predictions = predictions[1]
# words = char_predictions['texts']
# rec_scores = char_predictions['rec_scores']
# char_scores = char_predictions['rec_char_scores']
# seq_words = char_predictions['seq_outputs']
# seq_scores = char_predictions['seq_scores']
# detailed_seq_scores = char_predictions['detailed_seq_scores']
# global_predictions = [o.to(cpu_device) for o in global_predictions]
# results_dict.update(
# {image_paths[0]: [global_predictions[0], words, rec_scores, char_scores, seq_words, seq_scores, detailed_seq_scores]}
# )
# return results_dict
def compute_on_dataset(model, data_loader, device):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for i, batch in tqdm(enumerate(data_loader)):
images, targets, image_paths = batch
images = images.to(device)
with torch.no_grad():
predictions = model(images)
if predictions is not None:
global_predictions = predictions[0]
char_predictions = predictions[1]
char_mask = char_predictions['char_mask']
boxes = char_predictions['boxes']
seq_words = char_predictions['seq_outputs']
seq_scores = char_predictions['seq_scores']
detailed_seq_scores = char_predictions['detailed_seq_scores']
global_predictions = [o.to(cpu_device) for o in global_predictions]
results_dict.update(
{image_paths[0]: [global_predictions[0], char_mask, boxes, seq_words, seq_scores, detailed_seq_scores]}
)
return results_dict
def get_tight_rect(points, start_x, start_y, image_height, image_width, scale):
points = list(points)
ps = sorted(points,key = lambda x:x[0])
if ps[1][1] > ps[0][1]:
px1 = ps[0][0] * scale + start_x
py1 = ps[0][1] * scale + start_y
px4 = ps[1][0] * scale + start_x
py4 = ps[1][1] * scale + start_y
else:
px1 = ps[1][0] * scale + start_x
py1 = ps[1][1] * scale + start_y
px4 = ps[0][0] * scale + start_x
py4 = ps[0][1] * scale + start_y
if ps[3][1] > ps[2][1]:
px2 = ps[2][0] * scale + start_x
py2 = ps[2][1] * scale + start_y
px3 = ps[3][0] * scale + start_x
py3 = ps[3][1] * scale + start_y
else:
px2 = ps[3][0] * scale + start_x
py2 = ps[3][1] * scale + start_y
px3 = ps[2][0] * scale + start_x
py3 = ps[2][1] * scale + start_y
if px1<0:
px1=1
if px1>image_width:
px1 = image_width - 1
if px2<0:
px2=1
if px2>image_width:
px2 = image_width - 1
if px3<0:
px3=1
if px3>image_width:
px3 = image_width - 1
if px4<0:
px4=1
if px4>image_width:
px4 = image_width - 1
if py1<0:
py1=1
if py1>image_height:
py1 = image_height - 1
if py2<0:
py2=1
if py2>image_height:
py2 = image_height - 1
if py3<0:
py3=1
if py3>image_height:
py3 = image_height - 1
if py4<0:
py4=1
if py4>image_height:
py4 = image_height - 1
return [px1, py1, px2, py2, px3, py3, px4, py4]
def mask2polygon(mask, box, im_size, threshold=0.5, output_folder=None):
# mask 32*128
image_width, image_height = im_size[0], im_size[1]
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cls_polys = (mask*255).astype(np.uint8)
poly_map = np.array(Image.fromarray(cls_polys).resize((box_w, box_h)))
poly_map = poly_map.astype(np.float32) / 255
poly_map=cv2.GaussianBlur(poly_map,(3,3),sigmaX=3)
ret, poly_map = cv2.threshold(poly_map,0.5,1,cv2.THRESH_BINARY)
if 'total_text' in output_folder or 'cute80' in output_folder:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
try:
_, contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
except:
contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours)==0:
print(contours)
print(len(contours))
return None
max_area=0
max_cnt = contours[0]
for cnt in contours:
area=cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_cnt = cnt
perimeter = cv2.arcLength(max_cnt,True)
epsilon = 0.01*cv2.arcLength(max_cnt,True)
approx = cv2.approxPolyDP(max_cnt,epsilon,True)
pts = approx.reshape((-1,2))
pts[:,0] = pts[:,0] + box[0]
pts[:,1] = pts[:,1] + box[1]
polygon = list(pts.reshape((-1,)))
polygon = list(map(int, polygon))
if len(polygon)<6:
return None
else:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
idy,idx=np.where(poly_map == 1)
xy=np.vstack((idx,idy))
xy=np.transpose(xy)
hull = cv2.convexHull(xy, clockwise=True)
#reverse order of points.
if hull is None:
return None
hull=hull[::-1]
#find minimum area bounding box.
rect = cv2.minAreaRect(hull)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, box[0], box[1], image_height, image_width, 1)
polygon = [x * 1.0 for x in pts]
polygon = list(map(int, polygon))
return polygon
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = scatter_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
return predictions
def format_output(out_dir, boxes, img_name):
res = open(os.path.join(out_dir, 'res_' + img_name.split('.')[0] + '.txt'), 'wt')
## char score save dir
ssur_name = os.path.join(out_dir, 'res_' + img_name.split('.')[0])
for i, box in enumerate(boxes):
save_name = ssur_name + '_' + str(i) + '.pkl'
save_dict = {}
if 'total_text' in out_dir or 'cute80' in out_dir:
# np.save(save_name, box[-2])
save_dict['seg_char_scores'] = box[-3]
save_dict['seq_char_scores'] = box[-2]
box = ','.join([str(x) for x in box[:4]]) + ';' + ','.join([str(x) for x in box[4:4+int(box[-1])]]) + ';' + ','.join([str(x) for x in box[4+int(box[-1]):-3]]) + ',' + save_name
else:
save_dict['seg_char_scores'] = box[-2]
save_dict['seq_char_scores'] = box[-1]
np.save(save_name, box[-1])
box = ','.join([str(x) for x in box[:-2]]) + ',' + save_name
with open(save_name,'wb') as f:
pickle.dump(save_dict, f, protocol = 2)
res.write(box + '\n')
res.close()
def process_char_mask(char_masks, boxes, threshold=192):
texts, rec_scores, rec_char_scores, char_polygons = [], [], [], []
for index in range(char_masks.shape[0]):
box = list(boxes[index])
box = list(map(int, box))
text, rec_score, rec_char_score, char_polygon = getstr_grid(char_masks[index,:,:,:].copy(), box, threshold=threshold)
texts.append(text)
rec_scores.append(rec_score)
rec_char_scores.append(rec_char_score)
char_polygons.append(char_polygon)
# segmss.append(segms)
return texts, rec_scores, rec_char_scores, char_polygons
# def prepare_results_for_evaluation(predictions, output_folder, model_name):
# results_dir = os.path.join(output_folder, model_name+'_results')
# if not os.path.isdir(results_dir):
# os.mkdir(results_dir)
# for image_path, prediction in predictions.items():
# im_name = image_path.split('/')[-1]
# global_prediction, words, rec_scores, char_scores, seq_words, seq_scores, detailed_seq_scores = prediction[0], prediction[1], prediction[2], prediction[3], prediction[4], prediction[5], prediction[6]
# # print(detailed_seq_scores.shape)
# img = Image.open(image_path)
# width, height = img.size
# global_prediction = global_prediction.resize((width, height))
# boxes = global_prediction.bbox.tolist()
# scores = global_prediction.get_field("scores").tolist()
# masks = global_prediction.get_field("mask").cpu().numpy()
# result_logs = []
# for k, box in enumerate(boxes):
# box = list(map(int, box))
# mask = masks[k,0,:,:]
# polygon = mask2polygon(mask, box, img.size, threshold=0.5, output_folder=output_folder)
# if polygon is None:
# polygon = [box[0], box[1], box[2], box[1], box[2], box[3], box[0], box[3]]
# score = scores[k]
# word = words[k]
# rec_score = rec_scores[k]
# char_score = char_scores[k]
# seq_word = seq_words[k]
# seq_char_scores = seq_scores[k]
# seq_score = sum(seq_char_scores) / float(len(seq_char_scores))
# # detailed_seq_score = detailed_seq_scores[k]
# # print(detailed_seq_score.shape)
# if 'total_text' in output_folder or 'cute80' in output_folder:
# result_log = [int(x * 1.0) for x in box[:4]] + polygon + [word] + [seq_word] + [score] + [rec_score] + [seq_score] + [char_score] + [detailed_seq_score] + [len(polygon)]
# else:
# result_log = [int(x * 1.0) for x in box[:4]] + polygon + [word] + [seq_word] + [score] + [rec_score] + [seq_score] + [char_score] + [detailed_seq_score]
# result_logs.append(result_log)
# format_output(results_dir, result_logs, im_name)
def creat_color_map(n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits-1))
for j in range(splits):
g = int(j * width * 1.0 / (splits-1))
for k in range(splits-1):
b = int(k * width * 1.0 / (splits-1))
maps.append((r, g, b, 200))
return maps
def visualization(image, polygons, char_polygons, words, resize_ratio, colors):
draw = ImageDraw.Draw(image, 'RGBA')
for polygon in polygons:
draw.polygon(polygon, fill=None, outline=(0, 255, 0, 255))
for i, char_polygon in enumerate(char_polygons):
for j, polygon in enumerate(char_polygon):
polygon = [int(x*resize_ratio) for x in polygon]
char = words[i][j]
color = colors[char2num(char)]
draw.polygon(polygon, fill=color, outline=color)
def prepare_results_for_evaluation(predictions, output_folder, model_name, vis=False):
results_dir = os.path.join(output_folder, model_name+'_results')
if not os.path.isdir(results_dir):
os.mkdir(results_dir)
if vis:
visu_dir = os.path.join(output_folder, model_name+'_visu')
if not os.path.isdir(visu_dir):
os.mkdir(visu_dir)
for image_path, prediction in predictions.items():
im_name = image_path.split('/')[-1]
global_prediction, char_mask, boxes_char, seq_words, seq_scores, detailed_seq_scores = prediction[0], prediction[1], prediction[2], prediction[3], prediction[4], prediction[5]
words, rec_scores, rec_char_scoress, char_polygons = process_char_mask(char_mask, boxes_char)
test_image_width, test_image_height = global_prediction.size
img = Image.open(image_path)
width, height = img.size
resize_ratio = float(height) / test_image_height
global_prediction = global_prediction.resize((width, height))
boxes = global_prediction.bbox.tolist()
scores = global_prediction.get_field("scores").tolist()
masks = global_prediction.get_field("mask").cpu().numpy()
result_logs = []
polygons = []
for k, box in enumerate(boxes):
box = list(map(int, box))
mask = masks[k,0,:,:]
polygon = mask2polygon(mask, box, img.size, threshold=0.5, output_folder=output_folder)
if polygon is None:
polygon = [box[0], box[1], box[2], box[1], box[2], box[3], box[0], box[3]]
polygons.append(polygon)
score = scores[k]
word = words[k]
rec_score = rec_scores[k]
char_score = rec_char_scoress[k]
seq_word = seq_words[k]
seq_char_scores = seq_scores[k]
seq_score = sum(seq_char_scores) / float(len(seq_char_scores))
detailed_seq_score = detailed_seq_scores[k]
detailed_seq_score = np.squeeze(np.array(detailed_seq_score), axis=1)
if 'total_text' in output_folder or 'cute80' in output_folder:
result_log = [int(x * 1.0) for x in box[:4]] + polygon + [word] + [seq_word] + [score] + [rec_score] + [seq_score] + [char_score] + [detailed_seq_score] + [len(polygon)]
else:
result_log = [int(x * 1.0) for x in box[:4]] + polygon + [word] + [seq_word] + [score] + [rec_score] + [seq_score] + [char_score] + [detailed_seq_score]
result_logs.append(result_log)
if vis:
colors = creat_color_map(37, 255)
visualization(img, polygons, char_polygons, words, resize_ratio, colors)
img.save(os.path.join(visu_dir,im_name))
format_output(results_dir, result_logs, im_name)
def inference(
model,
data_loader,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
model_name=None,
cfg=None
):
# convert to a torch.device for efficiency
model_name = model_name.split('.')[0] + '_' + str(cfg.INPUT.MIN_SIZE_TEST)
predictions_path = os.path.join(output_folder, model_name + '_predictions.pth')
if os.path.isfile(predictions_path):
predictions = torch.load(predictions_path)
else:
device = torch.device(device)
num_devices = (
dist.get_world_size()
if dist.is_initialized()
else 1
)
logger = logging.getLogger("maskrcnn_benchmark.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} images".format(len(dataset)))
start_time = time.time()
predictions = compute_on_dataset(model, data_loader, device)
# wait for all processes to complete before measuring the time
# synchronize()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
logger.info(
"Total inference time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
# predictions = _accumulate_predictions_from_multiple_gpus(predictions)
# if not is_main_process():
# return
if output_folder:
torch.save(predictions, predictions_path)
prepare_results_for_evaluation(predictions, output_folder, model_name, vis=cfg.TEST.VIS) | 15,120 | 36.992462 | 203 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/engine/trainer.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import time
import torch
import torch.distributed as dist
from maskrcnn_benchmark.utils.comm import get_world_size, is_main_process
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k, v in loss_dict.items():
loss_names.append(k)
all_losses.append(v)
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
tb_logger,
cfg,
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
data_time = time.time() - end
arguments["iteration"] = iteration
scheduler.step()
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
if cfg.SOLVER.USE_ADAM:
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == (max_iter - 1):
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if is_main_process():
for tag, value in loss_dict_reduced.items():
tb_logger.scalar_summary(tag, value.item(), iteration)
if iteration % checkpoint_period == 0 and iteration > 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
checkpointer.save("model_{:07d}".format(iteration), **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
| 4,044 | 32.991597 | 79 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/utils/c2_model_loading.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import pickle
from collections import OrderedDict
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
def _rename_basic_resnet_weights(layer_keys):
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [k.replace(".w", ".weight") for k in layer_keys]
layer_keys = [k.replace(".bn", "_bn") for k in layer_keys]
layer_keys = [k.replace(".b", ".bias") for k in layer_keys]
layer_keys = [k.replace("_bn.s", "_bn.scale") for k in layer_keys]
layer_keys = [k.replace(".biasranch", ".branch") for k in layer_keys]
layer_keys = [k.replace("bbox.pred", "bbox_pred") for k in layer_keys]
layer_keys = [k.replace("cls.score", "cls_score") for k in layer_keys]
layer_keys = [k.replace("res.conv1_", "conv1_") for k in layer_keys]
# RPN / Faster RCNN
layer_keys = [k.replace(".biasbox", ".bbox") for k in layer_keys]
layer_keys = [k.replace("conv.rpn", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox.pred", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [k.replace("rpn.cls.logits", "rpn.cls_logits") for k in layer_keys]
# Affine-Channel -> BatchNorm enaming
layer_keys = [k.replace("_bn.scale", "_bn.weight") for k in layer_keys]
# Make torchvision-compatible
layer_keys = [k.replace("conv1_bn.", "bn1.") for k in layer_keys]
layer_keys = [k.replace("res2.", "layer1.") for k in layer_keys]
layer_keys = [k.replace("res3.", "layer2.") for k in layer_keys]
layer_keys = [k.replace("res4.", "layer3.") for k in layer_keys]
layer_keys = [k.replace("res5.", "layer4.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2a_bn.", ".bn1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2b_bn.", ".bn2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
layer_keys = [k.replace(".branch2c_bn.", ".bn3.") for k in layer_keys]
layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys]
layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys]
return layer_keys
def _rename_fpn_weights(layer_keys, stage_names):
for mapped_idx, stage_name in enumerate(stage_names, 1):
suffix = ""
if mapped_idx < 4:
suffix = ".lateral"
layer_keys = [
k.replace("fpn.inner.layer{}.sum{}".format(stage_name, suffix), "fpn_inner{}".format(mapped_idx)) for k in layer_keys
]
layer_keys = [k.replace("fpn.layer{}.sum".format(stage_name), "fpn_layer{}".format(mapped_idx)) for k in layer_keys]
layer_keys = [k.replace("rpn.conv.fpn2", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox_pred.fpn2", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [
k.replace("rpn.cls_logits.fpn2", "rpn.cls_logits") for k in layer_keys
]
return layer_keys
def _rename_weights_for_resnet(weights, stage_names):
original_keys = sorted(weights.keys())
layer_keys = sorted(weights.keys())
# for X-101, rename output to fc1000 to avoid conflicts afterwards
layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys]
layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys]
# performs basic renaming: _ -> . , etc
layer_keys = _rename_basic_resnet_weights(layer_keys)
# FPN
layer_keys = _rename_fpn_weights(layer_keys, stage_names)
# Mask R-CNN
layer_keys = [k.replace("mask.fcn.logits", "mask_fcn_logits") for k in layer_keys]
layer_keys = [k.replace(".[mask].fcn", "mask_fcn") for k in layer_keys]
layer_keys = [k.replace("conv5.mask", "conv5_mask") for k in layer_keys]
# Keypoint R-CNN
layer_keys = [k.replace("kps.score.lowres", "kps_score_lowres") for k in layer_keys]
layer_keys = [k.replace("kps.score", "kps_score") for k in layer_keys]
layer_keys = [k.replace("conv.fcn", "conv_fcn") for k in layer_keys]
# Rename for our RPN structure
layer_keys = [k.replace("rpn.", "rpn.head.") for k in layer_keys]
key_map = {k: v for k, v in zip(original_keys, layer_keys)}
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights")
max_c2_key_size = max([len(k) for k in original_keys if "_momentum" not in k])
new_weights = OrderedDict()
for k in original_keys:
v = weights[k]
if "_momentum" in k:
continue
# if 'fc1000' in k:
# continue
w = torch.from_numpy(v)
# if "bn" in k:
# w = w.view(1, -1, 1, 1)
logger.info("C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]))
new_weights[key_map[k]] = w
return new_weights
def _load_c2_pickled_weights(file_path):
with open(file_path, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "blobs" in data:
weights = data["blobs"]
else:
weights = data
return weights
_C2_STAGE_NAMES = {
"R-50": ["1.2", "2.3", "3.5", "4.2"],
"R-101": ["1.2", "2.3", "3.22", "4.2"],
}
def load_c2_format(cfg, f):
# TODO make it support other architectures
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body.replace("-C4", "").replace("-FPN", "")
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
return dict(model=state_dict)
| 5,667 | 38.636364 | 129 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/utils/metric_logger.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import defaultdict
from collections import deque
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
return object.__getattr__(self, attr)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
| 1,714 | 25.796875 | 82 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/utils/checkpoint.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from maskrcnn_benchmark.utils.c2_model_loading import load_c2_format
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.model_zoo import cache_url
class Checkpointer(object):
def __init__(
self,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.save_dir = save_dir
self.save_to_disk = save_to_disk
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
def save(self, name, **kwargs):
if not self.save_dir:
return
if not self.save_to_disk:
return
data = {}
data["model"] = self.model.state_dict()
if self.optimizer is not None:
data["optimizer"] = self.optimizer.state_dict()
if self.scheduler is not None:
data["scheduler"] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, "{}.pth".format(name))
self.logger.info("Saving checkpoint to {}".format(save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
def load(self, f=None, resume=False):
if self.has_checkpoint():
# override argument with existing checkpoint
f = self.get_checkpoint_file()
if not f:
# no checkpoint could be found
self.logger.info("No checkpoint found. Initializing model from scratch")
return {}
self.logger.info("Loading checkpoint from {}".format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
if resume:
if "optimizer" in checkpoint and self.optimizer:
self.logger.info("Loading optimizer from {}".format(f))
self.optimizer.load_state_dict(checkpoint.pop("optimizer"))
if "scheduler" in checkpoint and self.scheduler:
self.logger.info("Loading scheduler from {}".format(f))
self.scheduler.load_state_dict(checkpoint.pop("scheduler"))
# return any further checkpoint data
return checkpoint
def has_checkpoint(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
return os.path.exists(save_file)
def get_checkpoint_file(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
last_saved = ""
return last_saved
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, "last_checkpoint")
with open(save_file, "w") as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f, map_location=torch.device("cpu"))
def _load_model(self, checkpoint):
load_state_dict(self.model, checkpoint.pop("model"))
class DetectronCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(DetectronCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(DetectronCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
| 4,813 | 33.385714 | 87 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/utils/comm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import os
import pickle
import tempfile
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_initialized():
return 1
return dist.get_world_size()
def is_main_process():
if not dist.is_initialized():
return True
return dist.get_rank() == 0
def get_rank():
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
"""
Helper function to synchronize between multiple processes when
using distributed training
"""
if not dist.is_initialized():
return
world_size = dist.get_world_size()
rank = dist.get_rank()
if world_size == 1:
return
def _send_and_wait(r):
if rank == r:
tensor = torch.tensor(0, device="cuda")
else:
tensor = torch.tensor(1, device="cuda")
dist.broadcast(tensor, r)
while tensor.item() == 1:
time.sleep(1)
_send_and_wait(0)
# now sync on the main process
_send_and_wait(1)
def _encode(encoded_data, data):
# gets a byte representation for the data
encoded_bytes = pickle.dumps(data)
# convert this byte string into a byte tensor
storage = torch.ByteStorage.from_buffer(encoded_bytes)
tensor = torch.ByteTensor(storage).to("cuda")
# encoding: first byte is the size and then rest is the data
s = tensor.numel()
assert s <= 255, "Can't encode data greater than 255 bytes"
# put the encoded data in encoded_data
encoded_data[0] = s
encoded_data[1 : (s + 1)] = tensor
def _decode(encoded_data):
size = encoded_data[0]
encoded_tensor = encoded_data[1 : (size + 1)].to("cpu")
return pickle.loads(bytearray(encoded_tensor.tolist()))
# TODO try to use tensor in shared-memory instead of serializing to disk
# this involves getting the all_gather to work
def scatter_gather(data):
"""
This function gathers data from multiple processes, and returns them
in a list, as they were obtained from each process.
This function is useful for retrieving data from multiple processes,
when launching the code with torch.distributed.launch
Note: this function is slow and should not be used in tight loops, i.e.,
do not use it in the training loop.
Arguments:
data: the object to be gathered from multiple processes.
It must be serializable
Returns:
result (list): a list with as many elements as there are processes,
where each element i in the list corresponds to the data that was
gathered from the process of rank i.
"""
# strategy: the main process creates a temporary directory, and communicates
# the location of the temporary directory to all other processes.
# each process will then serialize the data to the folder defined by
# the main process, and then the main process reads all of the serialized
# files and returns them in a list
if not dist.is_initialized():
return [data]
synchronize()
# get rank of the current process
rank = dist.get_rank()
# the data to communicate should be small
data_to_communicate = torch.empty(256, dtype=torch.uint8, device="cuda")
if rank == 0:
# manually creates a temporary directory, that needs to be cleaned
# afterwards
tmp_dir = tempfile.mkdtemp()
_encode(data_to_communicate, tmp_dir)
synchronize()
# the main process (rank=0) communicates the data to all processes
dist.broadcast(data_to_communicate, 0)
# get the data that was communicated
tmp_dir = _decode(data_to_communicate)
# each process serializes to a different file
file_template = "file{}.pth"
tmp_file = os.path.join(tmp_dir, file_template.format(rank))
torch.save(data, tmp_file)
# synchronize before loading the data
synchronize()
# only the master process returns the data
if rank == 0:
data_list = []
world_size = dist.get_world_size()
for r in range(world_size):
file_path = os.path.join(tmp_dir, file_template.format(r))
d = torch.load(file_path)
data_list.append(d)
# cleanup
os.remove(file_path)
# cleanup
os.rmdir(tmp_dir)
return data_list
| 4,514 | 29.714286 | 80 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/utils/model_zoo.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import sys
try:
from torch.hub import _download_url_to_file
from torch.hub import urlparse
from torch.hub import HASH_REGEX
except ImportError:
from torch.utils.model_zoo import _download_url_to_file
from torch.utils.model_zoo import urlparse
from torch.utils.model_zoo import HASH_REGEX
from maskrcnn_benchmark.utils.comm import is_main_process
from maskrcnn_benchmark.utils.comm import synchronize
# very similar to https://github.com/pytorch/pytorch/blob/master/torch/utils/model_zoo.py
# but with a few improvements and modifications
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file
| 3,045 | 48.129032 | 135 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/utils/collect_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import PIL
from torch.utils.collect_env import get_pretty_env_info
def get_pil_version():
return "\n Pillow ({})".format(PIL.__version__)
def collect_env_info():
env_str = get_pretty_env_info()
env_str += get_pil_version()
return env_str
| 338 | 21.6 | 71 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/utils/model_serialization.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
import logging
import torch
from maskrcnn_benchmark.utils.imports import import_file
def align_and_update_state_dicts(model_state_dict, loaded_state_dict):
"""
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# loaded_key string, if it matches
match_matrix = [
len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys
]
match_matrix = torch.as_tensor(match_matrix).view(
len(current_keys), len(loaded_keys)
)
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_size = max([len(key) for key in current_keys]) if current_keys else 1
max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
logger = logging.getLogger(__name__)
for idx_new, idx_old in enumerate(idxs.tolist()):
if idx_old == -1:
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
model_state_dict[key] = loaded_state_dict[key_old]
logger.info(
log_str_template.format(
key,
max_size,
key_old,
max_size_loaded,
tuple(loaded_state_dict[key_old].shape),
)
)
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def load_state_dict(model, loaded_state_dict):
model_state_dict = model.state_dict()
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.")
align_and_update_state_dicts(model_state_dict, loaded_state_dict)
# use strict loading
model.load_state_dict(model_state_dict)
| 3,464 | 41.777778 | 91 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/build.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import logging
import torch.utils.data
from maskrcnn_benchmark.utils.comm import get_world_size
from maskrcnn_benchmark.utils.imports import import_file
from . import datasets as D
from . import samplers
from .collate_batch import BatchCollator
from .transforms import build_transforms
def build_dataset(cfg,dataset_list, transforms, dataset_catalog, is_train=True):
"""
Arguments:
dataset_list (list[str]): Contains the names of the datasets, i.e.,
coco_2014_trian, coco_2014_val, etc
transforms (callable): transforms to apply to each (image, target) sample
dataset_catalog (DatasetCatalog): contains the information on how to
construct a dataset.
is_train (bool): whether to setup the dataset for training or testing
"""
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list))
datasets = []
for dataset_name in dataset_list:
data = dataset_catalog.get(dataset_name)
factory = getattr(D, data["factory"])
args = data["args"]
# for COCODataset, we want to remove images without annotations
# during training
if data["factory"] == "COCODataset":
args["remove_images_without_annotations"] = is_train
args["transforms"] = transforms
# make dataset from factory
dataset = factory(**args)
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset=D.MixDataset(datasets,cfg.DATASETS.RATIOS)
# dataset = D.ConcatDataset(datasets)
return [dataset]
def make_data_sampler(dataset, shuffle, distributed):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def _quantize(x, bins):
bins = sorted(bins.copy())
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
img_info = dataset.get_img_info(i)
aspect_ratio = float(img_info["height"]) / float(img_info["width"])
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_batch, num_iters=None, start_iter=0
):
if aspect_grouping:
if not isinstance(aspect_grouping, (list, tuple)):
aspect_grouping = [aspect_grouping]
aspect_ratios = _compute_aspect_ratios(dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, images_per_batch, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_batch, drop_last=False
)
if num_iters is not None:
batch_sampler = samplers.IterationBasedBatchSampler(batch_sampler, num_iters, start_iter)
return batch_sampler
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0):
num_gpus = get_world_size()
if is_train:
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = True
num_iters = cfg.SOLVER.MAX_ITER
else:
images_per_batch = cfg.TEST.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "TEST.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = False if not is_distributed else True
num_iters = None
start_iter = 0
if images_per_gpu > 1:
logger = logging.getLogger(__name__)
logger.warning(
"When using more than one image per GPU you may encounter "
"an out-of-memory (OOM) error if your GPU does not have "
"sufficient memory. If this happens, you can reduce "
"SOLVER.IMS_PER_BATCH (for training) or "
"TEST.IMS_PER_BATCH (for inference). For training, you must "
"also adjust the learning rate and schedule length according "
"to the linear scaling rule. See for example: "
"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14"
)
# group images which have similar aspect ratio. In this case, we only
# group in two cases: those with width / height > 1, and the other way around,
# but the code supports more general grouping strategy
aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = paths_catalog.DatasetCatalog
dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg,dataset_list, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = make_data_sampler(dataset, shuffle, is_distributed)
batch_sampler = make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
)
data_loaders.append(data_loader)
if is_train:
# during training, a single (possibly concatenated) data_loader is returned
assert len(data_loaders) == 1
return data_loaders[0]
return data_loaders
| 6,569 | 37.647059 | 143 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/datasets/concat_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method for querying the sizes of the image
"""
def get_idxs(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_img_info(self, idx):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_img_info(sample_idx)
class MixDataset(object):
def __init__(self,datasets,ratios):
self.datasets=datasets
self.ratios=ratios
self.lengths=[]
for dataset in self.datasets:
self.lengths.append(len(dataset))
self.lengths=np.array(self.lengths)
self.seperate_inds=[]
s=0
for i in self.ratios[:-1]:
s+=i
self.seperate_inds.append(s)
def __len__(self):
return self.lengths.sum()
def __getitem__(self, item):
i=np.random.rand()
ind=bisect.bisect_right(self.seperate_inds,i)
b_ind=np.random.randint(self.lengths[ind])
return self.datasets[ind][b_ind]
#def get_img_info(self,idx):
| 1,508 | 25.946429 | 72 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/datasets/synthtext.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
from PIL import Image,ImageDraw
import os
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, SegmentationCharMask, CharPolygons
import numpy as np
import torch
import pickle
class SynthtextDataset(object):
def __init__(self, use_charann, list_file_path, imgs_dir, gts_dir, transforms=None):
self.use_charann=use_charann
list_file = open(list_file_path, 'r')
image_lines = list_file.readlines()
self.image_lists = [os.path.join(imgs_dir, line.strip()) for line in image_lines]
self.gt_lists=[os.path.join(gts_dir, line.strip()+'.txt') for line in image_lines]
self.filtered_gts = []
self.transforms = transforms
self.min_proposal_size=2
self.char_classes = '_0123456789abcdefghijklmnopqrstuvwxyz'
self.vis=False
def __getitem__(self, item):
im_name=os.path.basename(self.image_lists[item])
#print(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width,height=img.size
gt_path=self.gt_lists[item]
words,boxes,charsbbs,segmentations=self.load_gt_from_txt(gt_path,height,width)
target = BoxList(boxes[:,:4], img.size, mode="xyxy",use_char_ann=self.use_charann)
classes=torch.ones(len(boxes))
target.add_field("labels", classes)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
if words[0]=='':
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
char_masks=SegmentationCharMask(charsbbs, words=words, use_char_ann=use_char_ann, size=img.size)
target.add_field("char_masks", char_masks)
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im=img.numpy().copy().transpose([1,2,0])+[102.9801,115.9465,122.7717]
new_im=Image.fromarray(new_im.astype(np.uint8)).convert('RGB')
mask = target.extra_fields['masks'].polygons[0].convert('mask')
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert('RGB')
if self.use_charann:
m, _ = target.extra_fields['char_masks'].chars_boxes[0].convert('char_mask')
color=self.creat_color_map(37,255)
color_map=color[m.numpy().astype(np.uint8)]
char=Image.fromarray(color_map.astype(np.uint8)).convert('RGB')
char = Image.blend(char, new_im, 0.5)
else:
char =new_im
new=Image.blend(char,mask,0.5)
img_draw = ImageDraw.Draw(new)
for box in target.bbox.numpy():
box=list(box)
box = box[:2]+[box[2],box[1]]+box[2:] + [box[0],box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save('./vis/char_'+im_name)
return img, target, self.image_lists[item]
def creat_color_map(self,n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits-1))
for j in range(splits):
g = int(j * width * 1.0 / (splits-1))
for k in range(splits-1):
b = int(k * width * 1.0 / (splits-1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations=[], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == '###':
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0,:]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i-1]
charbb[9] = tindex
charbbs.append(charbb.copy())
else:
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes=len(boxes)
if len(boxes) > 0:
keep_boxes=np.zeros((num_boxes,5))
keep_boxes[:,:4]=np.array(boxes)
keep_boxes[:,4]=range(num_boxes) #the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations
else:
charbbs = np.zeros((10,), dtype=np.float32)
for i in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), [[charbbs]], segmentations
else:
words.append('')
charbbs = np.zeros((10,), dtype=np.float32)
return words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]]
def line2boxes(self, line):
parts = line.strip().split(',')
if '\xef\xbb\xbf' in parts[0]:
parts[0] = parts[0][3:]
if '\ufeff' in parts[0]:
parts[0] = parts[0].replace('\ufeff', '')
x1 = np.array([int(float(x)) for x in parts[::9]])
y1 = np.array([int(float(x)) for x in parts[1::9]])
x2 = np.array([int(float(x)) for x in parts[2::9]])
y2 = np.array([int(float(x)) for x in parts[3::9]])
x3 = np.array([int(float(x)) for x in parts[4::9]])
y3 = np.array([int(float(x)) for x in parts[5::9]])
x4 = np.array([int(float(x)) for x in parts[6::9]])
y4 = np.array([int(float(x)) for x in parts[7::9]])
strs = parts[8::9]
loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose()
return strs, loc
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
xmaxs = np.maximum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
ymins = np.minimum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
ymaxs = np.maximum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
return np.logical_and(xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width,height=img.size
img_info = {'im_name': im_name, 'height': height, 'width': width}
return img_info
| 8,241 | 44.038251 | 143 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/datasets/scut.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
from PIL import Image, ImageDraw
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, SegmentationCharMask, CharPolygons
import numpy as np
import torch
class ScutDataset(object):
def __init__(self, use_charann, imgs_dir, gts_dir,transforms=None):
self.use_charann=use_charann
self.image_lists = [os.path.join(imgs_dir,img) for img in os.listdir(imgs_dir)]
self.gts_dir=gts_dir
self.transforms = transforms
self.min_proposal_size=2
self.char_classes = '_0123456789abcdefghijklmnopqrstuvwxyz'
self.vis=False
def __getitem__(self, item):
im_name=os.path.basename(self.image_lists[item])
#print(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width,height=img.size
gt_path=os.path.join(self.gts_dir,im_name+'.txt')
words,boxes,charsbbs,segmentations=self.load_gt_from_txt(gt_path,height,width)
if words[0]=='':
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
target = BoxList(boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann)
classes=torch.ones(len(boxes))
target.add_field("labels", classes)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
char_masks=SegmentationCharMask(charsbbs, words=words, use_char_ann=use_char_ann, size=img.size)
target.add_field("char_masks", char_masks)
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im=img.numpy().copy().transpose([1,2,0])+[102.9801,115.9465,122.7717]
new_im=Image.fromarray(new_im.astype(np.uint8)).convert('RGB')
mask = target.extra_fields['masks'].polygons[0].convert('mask')
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert('RGB')
if self.use_charann:
m, _ = target.extra_fields['char_masks'].chars_boxes[0].convert('char_mask')
color=self.creat_color_map(37,255)
color_map=color[m.numpy().astype(np.uint8)]
char=Image.fromarray(color_map.astype(np.uint8)).convert('RGB')
char = Image.blend(char, new_im, 0.5)
else:
char =new_im
new=Image.blend(char,mask,0.5)
img_draw = ImageDraw.Draw(new)
#ipdb.set_trace()
for box in target.bbox.numpy():
box=list(box)
box = box[:2]+[box[2],box[1]]+box[2:] + [box[0],box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save('./vis/char_'+im_name)
return img, target, self.image_lists[item]
def creat_color_map(self,n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits-1))
for j in range(splits):
g = int(j * width * 1.0 / (splits-1))
for k in range(splits-1):
b = int(k * width * 1.0 / (splits-1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations=[], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == '###':
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0,:]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i-1]
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes=len(boxes)
if len(boxes) > 0:
keep_boxes=np.zeros((num_boxes,5))
keep_boxes[:,:4]=np.array(boxes)
keep_boxes[:,4]=range(num_boxes) #the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations
else:
charbbs = np.zeros((10,), dtype=np.float32)
if len(charsboxes)==0:
for i in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), charsboxes, segmentations
else:
words.append('')
charbbs = np.zeros((10,), dtype=np.float32)
return words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]]
def line2boxes(self, line):
parts = line.strip().split(',')
if '\xef\xbb\xbf' in parts[0]:
parts[0] = parts[0][3:]
if '\ufeff' in parts[0]:
parts[0] = parts[0].replace('\ufeff', '')
x1 = np.array([int(float(x)) for x in parts[::9]])
y1 = np.array([int(float(x)) for x in parts[1::9]])
x2 = np.array([int(float(x)) for x in parts[2::9]])
y2 = np.array([int(float(x)) for x in parts[3::9]])
x3 = np.array([int(float(x)) for x in parts[4::9]])
y3 = np.array([int(float(x)) for x in parts[5::9]])
x4 = np.array([int(float(x)) for x in parts[6::9]])
y4 = np.array([int(float(x)) for x in parts[7::9]])
strs = parts[8::9]
loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose()
return strs, loc
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
xmaxs = np.maximum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
ymins = np.minimum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
ymaxs = np.maximum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
return np.logical_and(xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width,height=img.size
img_info = {'im_name': im_name, 'height': height, 'width': width}
return img_info | 8,112 | 44.324022 | 143 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/datasets/icdar.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
from PIL import Image, ImageDraw
import os
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, SegmentationCharMask, CharPolygons
import numpy as np
import torch
class IcdarDataset(object):
def __init__(self, use_charann, imgs_dir, gts_dir,transforms=None):
self.use_charann=use_charann
self.image_lists = [os.path.join(imgs_dir,img) for img in os.listdir(imgs_dir)]
self.gts_dir=gts_dir
self.transforms = transforms
self.min_proposal_size=2
self.char_classes = '_0123456789abcdefghijklmnopqrstuvwxyz'
self.vis=False
def __getitem__(self, item):
im_name=os.path.basename(self.image_lists[item])
#print(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width,height=img.size
if self.gts_dir is not None:
gt_path=os.path.join(self.gts_dir,im_name+'.txt')
if not os.path.isfile(gt_path):
gt_path=os.path.join(self.gts_dir,'gt_'+im_name.split('.')[0]+'.txt')
words,boxes,charsbbs,segmentations=self.load_gt_from_txt(gt_path,height,width)
target = BoxList(boxes[:,:4], img.size, mode="xyxy",use_char_ann=self.use_charann)
classes=torch.ones(len(boxes))
target.add_field("labels", classes)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
if words[0]=='':
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
char_masks=SegmentationCharMask(charsbbs, words=words, use_char_ann=use_char_ann, size=img.size)
target.add_field("char_masks", char_masks)
else:
target = None
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im=img.numpy().copy().transpose([1,2,0])+[102.9801,115.9465,122.7717]
new_im=Image.fromarray(new_im.astype(np.uint8)).convert('RGB')
mask = target.extra_fields['masks'].polygons[0].convert('mask')
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert('RGB')
if self.use_charann:
m, _ = target.extra_fields['char_masks'].chars_boxes[0].convert('char_mask')
color=self.creat_color_map(37,255)
color_map=color[m.numpy().astype(np.uint8)]
char=Image.fromarray(color_map.astype(np.uint8)).convert('RGB')
char = Image.blend(char, new_im, 0.5)
else:
char =new_im
new=Image.blend(char,mask,0.5)
img_draw = ImageDraw.Draw(new)
for box in target.bbox.numpy():
box=list(box)
box = box[:2]+[box[2],box[1]]+box[2:] + [box[0],box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save('./vis/char_'+im_name)
return img, target, self.image_lists[item]
def creat_color_map(self,n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits-1))
for j in range(splits):
g = int(j * width * 1.0 / (splits-1))
for k in range(splits-1):
b = int(k * width * 1.0 / (splits-1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations=[], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == '###':
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0,:]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i-1]
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes=len(boxes)
if len(boxes) > 0:
keep_boxes=np.zeros((num_boxes,5))
keep_boxes[:,:4]=np.array(boxes)
keep_boxes[:,4]=range(num_boxes) #the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations
else:
charbbs = np.zeros((10,), dtype=np.float32)
if len(charsboxes)==0:
for i in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), charsboxes, segmentations
else:
words.append('')
charbbs = np.zeros((10,), dtype=np.float32)
return words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]]
def line2boxes(self, line):
parts = line.strip().split(',', 8)
if '\xef\xbb\xbf' in parts[0]:
parts[0] = parts[0][3:]
if '\ufeff' in parts[0]:
parts[0] = parts[0].replace('\ufeff', '')
x1 = np.array([int(float(x)) for x in parts[::9]])
y1 = np.array([int(float(x)) for x in parts[1::9]])
x2 = np.array([int(float(x)) for x in parts[2::9]])
y2 = np.array([int(float(x)) for x in parts[3::9]])
x3 = np.array([int(float(x)) for x in parts[4::9]])
y3 = np.array([int(float(x)) for x in parts[5::9]])
x4 = np.array([int(float(x)) for x in parts[6::9]])
y4 = np.array([int(float(x)) for x in parts[7::9]])
strs = parts[8::9]
loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose()
return strs, loc
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
xmaxs = np.maximum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
ymins = np.minimum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
ymaxs = np.maximum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
return np.logical_and(xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width,height=img.size
img_info = {'im_name': im_name, 'height': height, 'width': width}
return img_info | 8,290 | 44.80663 | 143 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/datasets/total_text.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
from PIL import Image, ImageDraw
import os
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, SegmentationCharMask, CharPolygons
import numpy as np
import torch
class TotaltextDataset(object):
def __init__(self, use_charann, imgs_dir, gts_dir,transforms=None):
self.use_charann=use_charann
self.image_lists = [os.path.join(imgs_dir,img) for img in os.listdir(imgs_dir)]
self.gts_dir=gts_dir
self.transforms = transforms
self.min_proposal_size=2
self.char_classes = '_0123456789abcdefghijklmnopqrstuvwxyz'
self.vis=False
def __getitem__(self, item):
im_name=os.path.basename(self.image_lists[item])
#print(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width,height=img.size
if self.gts_dir is not None:
gt_path=os.path.join(self.gts_dir,im_name+'.txt')
words,boxes,charsbbs,segmentations=self.load_gt_from_txt(gt_path,height,width)
if words[0]=='':
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
target = BoxList(boxes[:,:4], img.size, mode="xyxy",use_char_ann=use_char_ann)
classes=torch.ones(len(boxes))
target.add_field("labels", classes)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
char_masks=SegmentationCharMask(charsbbs, words=words, use_char_ann=use_char_ann, size=img.size)
target.add_field("char_masks", char_masks)
else:
target = None
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im=img.numpy().copy().transpose([1,2,0])+[102.9801,115.9465,122.7717]
new_im=Image.fromarray(new_im.astype(np.uint8)).convert('RGB')
mask = target.extra_fields['masks'].polygons[0].convert('mask')
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert('RGB')
if self.use_charann:
m, _ = target.extra_fields['char_masks'].chars_boxes[0].convert('char_mask')
color=self.creat_color_map(37,255)
color_map=color[m.numpy().astype(np.uint8)]
char=Image.fromarray(color_map.astype(np.uint8)).convert('RGB')
char = Image.blend(char, new_im, 0.5)
else:
char =new_im
new=Image.blend(char,mask,0.5)
img_draw = ImageDraw.Draw(new)
#ipdb.set_trace()
for box in target.bbox.numpy():
box=list(box)
box = box[:2]+[box[2],box[1]]+box[2:] + [box[0],box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save('./vis/char_'+im_name)
return img, target, self.image_lists[item]
def creat_color_map(self,n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits-1))
for j in range(splits):
g = int(j * width * 1.0 / (splits-1))
for k in range(splits-1):
b = int(k * width * 1.0 / (splits-1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations=[], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == '###':
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0,:]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i-1]
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes=len(boxes)
if len(boxes) > 0:
keep_boxes=np.zeros((num_boxes,5))
keep_boxes[:,:4]=np.array(boxes)
keep_boxes[:,4]=range(num_boxes) #the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations
else:
charbbs = np.zeros((10,), dtype=np.float32)
for i in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), charsboxes, segmentations
else:
words.append('')
charbbs = np.zeros((10,), dtype=np.float32)
return words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]]
def line2boxes(self, line):
parts = line.strip().split(',')
return [parts[-1]], np.array([[float(x) for x in parts[:-1]]])
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
xmaxs = np.maximum.reduce([charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]])
ymins = np.minimum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
ymaxs = np.maximum.reduce([charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]])
return np.logical_and(xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width,height=img.size
img_info = {'im_name': im_name, 'height': height, 'width': width}
return img_info | 7,448 | 44.145455 | 143 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/datasets/coco.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torchvision
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
self.ids = [
img_id
for img_id in self.ids
if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0
]
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self.transforms = transforms
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh",use_char_ann=False).convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
| 2,363 | 34.818182 | 89 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,844 | 41.130435 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch.utils.data.sampler import BatchSampler
class IterationBasedBatchSampler(BatchSampler):
"""
Wraps a BatchSampler, resampling from it until
a specified number of iterations have been sampled
"""
def __init__(self, batch_sampler, num_iterations, start_iter=0):
self.batch_sampler = batch_sampler
self.num_iterations = num_iterations
self.start_iter = start_iter
def __iter__(self):
iteration = self.start_iter
while iteration <= self.num_iterations:
# if the underlying sampler has a set_epoch method, like
# DistributedSampler, used for making each process see
# a different split of the dataset, then set it
if hasattr(self.batch_sampler.sampler, "set_epoch"):
self.batch_sampler.sampler.set_epoch(iteration)
for batch in self.batch_sampler:
iteration += 1
if iteration > self.num_iterations:
break
yield batch
def __len__(self):
return self.num_iterations
| 1,164 | 35.40625 | 71 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed,
# with a modification in the import to use the deprecated backend
# FIXME remove this once c10d fixes the bug it has
import math
import torch
# import torch.distributed.deprecated as dist
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = True
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,678 | 37.826087 | 86 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/data/transforms/transforms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
from shapely.geometry import box, Polygon
from shapely import affinity
from PIL import Image
import cv2
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
if isinstance(self.min_size, tuple):
if len(self.min_size) == 1:
size = self.min_size[0]
else:
random_size_index = random.randint(0,len(self.min_size)-1)
size = self.min_size[random_size_index]
else:
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
if target is not None:
target = target.resize(image.size)
return image, target
class RandomCrop(object):
def __init__(self, prob, crop_min_size=500,crop_max_size=1000,max_trys=50):
self.min_size=crop_min_size
self.max_size=crop_max_size
self.max_trys=max_trys
self.prob=prob
def __call__(self,image,target):
if random.random()<self.prob:
im=np.array(image)
w, h = image.size
h_array = np.zeros((h), dtype=np.int32)
w_array = np.zeros((w), dtype=np.int32)
boxes=target.bbox.numpy()
if len(boxes)==0:
return image,target
for box in boxes:
box = np.round(box, decimals=0).astype(np.int32)
minx = box[0]
maxx = box[2]
w_array[minx:maxx] = 1
miny = box[1]
maxy = box[3]
h_array[miny:maxy] = 1
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return image,target
for i in range(self.max_trys):
xx = np.random.choice(w_axis, size=2)
xmin=min(xx)
xmax=max(xx)
x_size=xmax-xmin
if x_size > self.max_size or x_size < self.min_size:
continue
yy = np.random.choice(h_axis, size=2)
ymin=min(yy)
ymax=max(yy)
y_size=ymax-ymin
if y_size > self.max_size or y_size < self.min_size:
continue
box_in_area=(boxes[:,0]>=xmin)&(boxes[:,1]>=ymin)&(boxes[:,2]<=xmax)&(boxes[:,3]<=ymax)
if len(np.where(box_in_area)[0]) == 0:
continue
im=im[ymin:ymax,xmin:xmax]
target=target.crop([xmin, ymin, xmax, ymax])
return Image.fromarray(im), target
return image, target
else:
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
class RandomBrightness(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
brightness_factor = random.uniform(0.5, 2)
image = F.adjust_brightness(image, brightness_factor)
return image, target
class RandomContrast(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
contrast_factor = random.uniform(0.5, 2)
image = F.adjust_contrast(image, contrast_factor)
return image, target
class RandomHue(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
hue_factor = random.uniform(-0.25, 0.25)
image = F.adjust_hue(image, hue_factor)
return image, target
class RandomSaturation(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
saturation_factor = random.uniform(0.5, 2)
image = F.adjust_saturation(image, saturation_factor)
return image, target
class RandomGamma(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
gamma_factor = random.uniform(0.5, 2)
image = F.adjust_gamma(image, gamma_factor)
return image, target
class RandomRotate(object):
def __init__(self, prob, max_theta=30):
self.prob = prob
self.max_theta = max_theta
def __call__(self, image, target):
if random.random() < self.prob and target is not None:
#try:
delta = random.uniform(-1*self.max_theta, self.max_theta)
width, height = image.size
## get the minimal rect to cover the rotated image
img_box= [[[0, 0], [width, 0], [width, height], [0, height]]]
rotated_img_box = _quad2minrect(_rotate_polygons(img_box, delta, (width/2, height/2)))
r_height = int(max(rotated_img_box[0][3], rotated_img_box[0][1]) - min(rotated_img_box[0][3], rotated_img_box[0][1]))
r_width = int(max(rotated_img_box[0][2], rotated_img_box[0][0]) - min(rotated_img_box[0][2], rotated_img_box[0][0]))
r_height = max(r_height, height+1)
r_width = max(r_width, width+1)
## padding im
im_padding = np.zeros((r_height, r_width, 3))
start_h, start_w = int((r_height - height)/2.0), int((r_width - width)/2.0)
end_h, end_w = start_h + height, start_w + width
im_padding[start_h:end_h, start_w:end_w, :] = image
M = cv2.getRotationMatrix2D((r_width/2, r_height/2), delta, 1)
im= cv2.warpAffine(im_padding, M, (r_width, r_height))
im=Image.fromarray(im.astype(np.uint8))
target=target.rotate(-delta,(r_width/2,r_height/2),start_h,start_w)
return im, target
#except:
# return image, target
else:
return image, target
def _quad2minrect(boxes):
## trans a quad(N*4) to a rectangle(N*4) which has miniual area to cover it
return np.hstack((boxes[:, ::2].min(axis=1).reshape((-1, 1)), boxes[:, 1::2].min(axis=1).reshape((-1, 1)), boxes[:, ::2].max(axis=1).reshape((-1, 1)), boxes[:, 1::2].max(axis=1).reshape((-1, 1))))
def _boxlist2quads(boxlist):
res = np.zeros((len(boxlist), 8))
for i, box in enumerate(boxlist):
# print(box)
res[i] = np.array([box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], box[2][1], box[3][0], box[3][1]])
return res
def _rotate_polygons(polygons, angle, r_c):
## polygons: N*8
## r_x: rotate center x
## r_y: rotate center y
## angle: -15~15
rotate_boxes_list = []
for poly in polygons:
box = Polygon(poly)
rbox = affinity.rotate(box, angle, r_c)
if len(list(rbox.exterior.coords))<5:
print('img_box_ori:',poly)
print('img_box_rotated:',rbox)
#assert(len(list(rbox.exterior.coords))>=5)
rotate_boxes_list.append(rbox.boundary.coords[:-1])
res = _boxlist2quads(rotate_boxes_list)
return res
| 9,303 | 33.716418 | 200 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/matcher.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# handle empty case
device = match_quality_matrix.device
return torch.empty((0,), dtype=torch.int64, device=device)
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches[below_low_threshold] = Matcher.BELOW_LOW_THRESHOLD
matches[between_thresholds] = Matcher.BETWEEN_THRESHOLDS
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
| 4,845 | 44.28972 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
| 404 | 22.823529 | 97 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/poolers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import ROIAlign
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
"""
Arguments:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return target_lvls.to(torch.int64) - self.k_min
class Pooler(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[flaot]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -math.log2(scales[0])
lvl_max = -math.log2(scales[-1])
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size_h = self.output_size[0]
output_size_w = self.output_size[1]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size_h, output_size_w),
dtype=dtype,
device=device,
)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level)
return result
| 4,171 | 32.645161 | 90 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class BalancedPositiveNegativeSampler(object):
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image, positive_fraction):
"""
Arguments:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentace of positive elements per batch
"""
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs):
"""
Arguments:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
positives.
Returns:
pos_idx (list[tensor])
neg_idx (list[tensor])
Returns two lists of binary masks for each image.
The first list contains the positive elements that were selected,
and the second list the negative example.
"""
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)
negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = self.batch_size_per_image - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel())[:num_pos]
perm2 = torch.randperm(negative.numel())[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
# create binary mask from indices
pos_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.bool
)
neg_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.bool
)
pos_idx_per_image_mask[pos_idx_per_image] = 1
neg_idx_per_image_mask[neg_idx_per_image] = 1
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
| 2,668 | 37.681159 | 90 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/box_coder.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import torch
class BoxCoder(object):
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
"""
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
"""
Arguments:
weights (4-element tuple)
bbox_xform_clip (float)
"""
self.weights = weights
self.bbox_xform_clip = bbox_xform_clip
def encode(self, reference_boxes, proposals):
"""
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
TO_REMOVE = 1 # TODO remove
ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE
ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE
ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths
ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights
gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE
gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE
gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = self.weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
def decode(self, rel_codes, boxes):
"""
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Arguments:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
"""
boxes = boxes.to(rel_codes.dtype)
TO_REMOVE = 1 # TODO remove
widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE
heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = rel_codes[:, 0::4] / wx
dy = rel_codes[:, 1::4] / wy
dw = rel_codes[:, 2::4] / ww
dh = rel_codes[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.bbox_xform_clip)
dh = torch.clamp(dh, max=self.bbox_xform_clip)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(rel_codes)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
| 3,367 | 34.083333 | 86 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/backbone/resnet.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Variant of the resnet module that takes cfg as an argument.
Example usage. Strings may be specified in the config file.
model = ResNet(
"StemWithFixedBatchNorm",
"BottleneckWithFixedBatchNorm",
"ResNet50StagesTo4",
)
Custom implementations may be written in user code and hooked in via the
`register_*` functions.
"""
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import FrozenBatchNorm2d
from maskrcnn_benchmark.layers import Conv2d
# ResNet stage specification
StageSpec = namedtuple(
"StageSpec",
[
"index", # Index of the stage, eg 1, 2, ..,. 5
"block_count", # Numer of residual blocks in the stage
"return_features", # True => return the last feature map from this stage
],
)
# -----------------------------------------------------------------------------
# Standard ResNet models
# -----------------------------------------------------------------------------
# ResNet-50 (including all stages)
ResNet50StagesTo5 = (
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True))
)
# ResNet-50 up to stage 4 (excludes stage 5)
ResNet50StagesTo4 = (
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True))
)
# ResNet-50-FPN (including all stages)
ResNet50FPNStagesTo5 = (
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True))
)
# ResNet-101-FPN (including all stages)
ResNet101FPNStagesTo5 = (
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True))
)
class ResNet(nn.Module):
def __init__(self, cfg):
super(ResNet, self).__init__()
# If we want to use the cfg in forward(), then we should make a copy
# of it and store it for later use:
# self.cfg = cfg.clone()
# Translate string names to implementations
stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC]
stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY]
transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC]
# Construct the stem module
self.stem = stem_module(cfg)
# Constuct the specified ResNet stages
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
stage2_bottleneck_channels = num_groups * width_per_group
stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
self.stages = []
self.return_features = {}
for stage_spec in stage_specs:
name = "layer" + str(stage_spec.index)
stage2_relative_factor = 2 ** (stage_spec.index - 1)
bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
out_channels = stage2_out_channels * stage2_relative_factor
module = _make_stage(
transformation_module,
in_channels,
bottleneck_channels,
out_channels,
stage_spec.block_count,
num_groups,
cfg.MODEL.RESNETS.STRIDE_IN_1X1,
first_stride=int(stage_spec.index > 1) + 1,
)
in_channels = out_channels
self.add_module(name, module)
self.stages.append(name)
self.return_features[name] = stage_spec.return_features
# Optionally freeze (requires_grad=False) parts of the backbone
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)
def _freeze_backbone(self, freeze_at):
for stage_index in range(freeze_at):
if stage_index == 0:
m = self.stem # stage 0 is the stem
else:
m = getattr(self, "layer" + str(stage_index))
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
outputs = []
x = self.stem(x)
for stage_name in self.stages:
x = getattr(self, stage_name)(x)
if self.return_features[stage_name]:
outputs.append(x)
return outputs
class ResNetHead(nn.Module):
def __init__(
self,
block_module,
stages,
num_groups=1,
width_per_group=64,
stride_in_1x1=True,
stride_init=None,
res2_out_channels=256,
):
super(ResNetHead, self).__init__()
stage2_relative_factor = 2 ** (stages[0].index - 1)
stage2_bottleneck_channels = num_groups * width_per_group
out_channels = res2_out_channels * stage2_relative_factor
in_channels = out_channels // 2
bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
block_module = _TRANSFORMATION_MODULES[block_module]
self.stages = []
stride = stride_init
for stage in stages:
name = "layer" + str(stage.index)
if not stride:
stride = int(stage.index > 1) + 1
module = _make_stage(
block_module,
in_channels,
bottleneck_channels,
out_channels,
stage.block_count,
num_groups,
stride_in_1x1,
first_stride=stride,
)
stride = None
self.add_module(name, module)
self.stages.append(name)
def forward(self, x):
for stage in self.stages:
x = getattr(self, stage)(x)
return x
def _make_stage(
transformation_module,
in_channels,
bottleneck_channels,
out_channels,
block_count,
num_groups,
stride_in_1x1,
first_stride,
):
blocks = []
stride = first_stride
for _ in range(block_count):
blocks.append(
transformation_module(
in_channels,
bottleneck_channels,
out_channels,
num_groups,
stride_in_1x1,
stride,
)
)
stride = 1
in_channels = out_channels
return nn.Sequential(*blocks)
class BottleneckWithFixedBatchNorm(nn.Module):
def __init__(
self,
in_channels,
bottleneck_channels,
out_channels,
num_groups=1,
stride_in_1x1=True,
stride=1,
):
super(BottleneckWithFixedBatchNorm, self).__init__()
self.downsample = None
if in_channels != out_channels:
self.downsample = nn.Sequential(
Conv2d(
in_channels, out_channels, kernel_size=1, stride=stride, bias=False
),
FrozenBatchNorm2d(out_channels),
)
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
)
self.bn1 = FrozenBatchNorm2d(bottleneck_channels)
# TODO: specify init for the above
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1,
bias=False,
groups=num_groups,
)
self.bn2 = FrozenBatchNorm2d(bottleneck_channels)
self.conv3 = Conv2d(
bottleneck_channels, out_channels, kernel_size=1, bias=False
)
self.bn3 = FrozenBatchNorm2d(out_channels)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu_(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu_(out)
out0 = self.conv3(out)
out = self.bn3(out0)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = F.relu_(out)
return out
class StemWithFixedBatchNorm(nn.Module):
def __init__(self, cfg):
super(StemWithFixedBatchNorm, self).__init__()
out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
self.conv1 = Conv2d(
3, out_channels, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = FrozenBatchNorm2d(out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
_TRANSFORMATION_MODULES = {"BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm}
_STEM_MODULES = {"StemWithFixedBatchNorm": StemWithFixedBatchNorm}
_STAGE_SPECS = {
"R-50-C4": ResNet50StagesTo4,
"R-50-C5": ResNet50StagesTo5,
"R-50-FPN": ResNet50FPNStagesTo5,
"R-101-FPN": ResNet101FPNStagesTo5,
}
def register_transformation_module(module_name, module):
_register_generic(_TRANSFORMATION_MODULES, module_name, module)
def register_stem_module(module_name, module):
_register_generic(_STEM_MODULES, module_name, module)
def register_stage_spec(stage_spec_name, stage_spec):
_register_generic(_STAGE_SPECS, stage_spec_name, stage_spec)
def _register_generic(module_dict, module_name, module):
assert module_name not in module_dict
module_dict[module_name] = module
| 9,902 | 29.946875 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/backbone/backbone.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
from torch import nn
from . import fpn as fpn_module
from . import resnet
def build_resnet_backbone(cfg):
body = resnet.ResNet(cfg)
model = nn.Sequential(OrderedDict([("body", body)]))
return model
def build_resnet_fpn_backbone(cfg):
body = resnet.ResNet(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
fpn = fpn_module.FPN(
in_channels_list=[
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
top_blocks=fpn_module.LastLevelMaxPool(),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
return model
_BACKBONES = {"resnet": build_resnet_backbone, "resnet-fpn": build_resnet_fpn_backbone}
def build_backbone(cfg):
assert cfg.MODEL.BACKBONE.CONV_BODY.startswith(
"R-"
), "Only ResNet and ResNeXt models are currently implemented"
# Models using FPN end with "-FPN"
if cfg.MODEL.BACKBONE.CONV_BODY.endswith("-FPN"):
return build_resnet_fpn_backbone(cfg)
return build_resnet_backbone(cfg)
| 1,310 | 28.133333 | 87 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/backbone/fpn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
class FPN(nn.Module):
"""
Module that adds FPN on top of a list of feature maps.
The feature maps are currently supposed to be in increasing depth
order, and must be consecutive
"""
def __init__(self, in_channels_list, out_channels, top_blocks=None):
"""
Arguments:
in_channels_list (list[int]): number of channels for each feature map that
will be fed
out_channels (int): number of channels of the FPN representation
top_blocks (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list
"""
super(FPN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
for idx, in_channels in enumerate(in_channels_list, 1):
inner_block = "fpn_inner{}".format(idx)
layer_block = "fpn_layer{}".format(idx)
inner_block_module = nn.Conv2d(in_channels, out_channels, 1)
layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1)
for module in [inner_block_module, layer_block_module]:
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(module.weight, a=1)
nn.init.constant_(module.bias, 0)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.top_blocks = top_blocks
def forward(self, x):
"""
Arguments:
x (list[Tensor]): feature maps for each feature level.
Returns:
results (tuple[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.
"""
last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
results = []
results.append(getattr(self, self.layer_blocks[-1])(last_inner))
for feature, inner_block, layer_block in zip(
x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
):
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
inner_lateral = getattr(self, inner_block)(feature)
# TODO use size instead of scale to make it robust to different sizes
# inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
# mode='bilinear', align_corners=False)
last_inner = inner_lateral + inner_top_down
results.insert(0, getattr(self, layer_block)(last_inner))
if self.top_blocks is not None:
last_results = self.top_blocks(results[-1])
results.extend(last_results)
return tuple(results)
class LastLevelMaxPool(nn.Module):
def forward(self, x):
return [F.max_pool2d(x, 1, 2, 0)]
| 3,234 | 42.133333 | 86 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from maskrcnn_benchmark.structures.image_list import to_image_list
from ..backbone import build_backbone
from ..rpn.rpn import build_rpn
from ..roi_heads.roi_heads import build_roi_heads
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
= rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.rpn = build_rpn(cfg)
self.roi_heads = build_roi_heads(cfg)
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
images = to_image_list(images)
features = self.backbone(images.tensors)
proposals, proposal_losses = self.rpn(images, features, targets)
if self.roi_heads:
x, result, detector_losses = self.roi_heads(features, proposals, targets)
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
return result
| 2,175 | 31.969697 | 87 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/rpn/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
from ..utils import cat
class RPNPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RPN boxes, before feeding the
proposals to the heads
"""
def __init__(
self,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size,
box_coder=None,
fpn_post_nms_top_n=None,
):
"""
Arguments:
pre_nms_top_n (int)
post_nms_top_n (int)
nms_thresh (float)
min_size (int)
box_coder (BoxCoder)
fpn_post_nms_top_n (int)
"""
super(RPNPostProcessor, self).__init__()
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = min_size
if box_coder is None:
box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self.box_coder = box_coder
if fpn_post_nms_top_n is None:
fpn_post_nms_top_n = post_nms_top_n
self.fpn_post_nms_top_n = fpn_post_nms_top_n
def add_gt_proposals(self, proposals, targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
device = proposals[0].bbox.device
gt_boxes = [target.copy_with_fields([]) for target in targets]
# later cat of bbox requires all fields to be present for all bbox
# so we need to add a dummy for objectness that's missing
for gt_box in gt_boxes:
gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
proposals = [
cat_boxlist((proposal, gt_box))
for proposal, gt_box in zip(proposals, gt_boxes)
]
return proposals
def forward_for_single_feature_map(self, anchors, objectness, box_regression):
"""
Arguments:
anchors: list[BoxList]
objectness: tensor of size N, A, H, W
box_regression: tensor of size N, A * 4, H, W
"""
device = objectness.device
N, A, H, W = objectness.shape
# put in the same format as anchors
objectness = objectness.permute(0, 2, 3, 1).reshape(N, -1)
objectness = objectness.sigmoid()
box_regression = box_regression.view(N, -1, 4, H, W).permute(0, 3, 4, 1, 2)
box_regression = box_regression.reshape(N, -1, 4)
num_anchors = A * H * W
pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)
batch_idx = torch.arange(N, device=device)[:, None]
box_regression = box_regression[batch_idx, topk_idx]
image_shapes = [box.size for box in anchors]
concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]
proposals = self.box_coder.decode(
box_regression.view(-1, 4), concat_anchors.view(-1, 4)
)
proposals = proposals.view(N, -1, 4)
result = []
for proposal, score, im_shape in zip(proposals, objectness, image_shapes):
boxlist = BoxList(proposal, im_shape, mode="xyxy")
boxlist.add_field("objectness", score)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
boxlist = boxlist_nms(
boxlist,
self.nms_thresh,
max_proposals=self.post_nms_top_n,
score_field="objectness",
)
result.append(boxlist)
return result
def forward(self, anchors, objectness, box_regression, targets=None):
"""
Arguments:
anchors: list[list[BoxList]]
objectness: list[tensor]
box_regression: list[tensor]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes = []
num_levels = len(objectness)
anchors = list(zip(*anchors))
for a, o, b in zip(anchors, objectness, box_regression):
sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
if num_levels > 1:
boxlists = self.select_over_all_levels(boxlists)
# append ground-truth bboxes to proposals
if self.training and targets is not None:
boxlists = self.add_gt_proposals(boxlists, targets)
return boxlists
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
# different behavior during training and during testing:
# during training, post_nms_top_n is over *all* the proposals combined, while
# during testing, it is over the proposals for each image
# TODO resolve this difference and make it consistent. It should be per image,
# and not per batch
if self.training:
objectness = torch.cat(
[boxlist.get_field("objectness") for boxlist in boxlists], dim=0
)
box_sizes = [len(boxlist) for boxlist in boxlists]
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)
inds_mask = torch.zeros_like(objectness, dtype=torch.bool)
inds_mask[inds_sorted] = 1
inds_mask = inds_mask.split(box_sizes)
for i in range(num_images):
boxlists[i] = boxlists[i][inds_mask[i]]
else:
for i in range(num_images):
objectness = boxlists[i].get_field("objectness")
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(
objectness, post_nms_top_n, dim=0, sorted=True
)
boxlists[i] = boxlists[i][inds_sorted]
return boxlists
def make_rpn_postprocessor(config, rpn_box_coder, is_train):
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN
if not is_train:
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN
if not is_train:
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST
nms_thresh = config.MODEL.RPN.NMS_THRESH
min_size = config.MODEL.RPN.MIN_SIZE
box_selector = RPNPostProcessor(
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
box_coder=rpn_box_coder,
fpn_post_nms_top_n=fpn_post_nms_top_n,
)
return box_selector
| 7,479 | 35.847291 | 87 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/rpn/anchor_generator.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
"""
def __init__(self, buffers=None):
super(BufferList, self).__init__()
if buffers is not None:
self.extend(buffers)
def extend(self, buffers):
offset = len(self)
for i, buffer in enumerate(buffers):
self.register_buffer(str(offset + i), buffer)
return self
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
class AnchorGenerator(nn.Module):
"""
For a set of image sizes and feature maps, computes a set
of anchors
"""
def __init__(
self,
sizes=(128, 256, 512),
aspect_ratios=(0.5, 1.0, 2.0),
anchor_strides=(8, 16, 32),
straddle_thresh=0,
):
super(AnchorGenerator, self).__init__()
if len(anchor_strides) == 1:
anchor_stride = anchor_strides[0]
cell_anchors = [
generate_anchors(anchor_stride, sizes, aspect_ratios).float()
]
else:
if len(anchor_strides) != len(sizes):
raise RuntimeError("FPN should have #anchor_strides == #sizes")
cell_anchors = [
generate_anchors(anchor_stride, (size,), aspect_ratios).float()
for anchor_stride, size in zip(anchor_strides, sizes)
]
self.strides = anchor_strides
self.cell_anchors = BufferList(cell_anchors)
self.straddle_thresh = straddle_thresh
def num_anchors_per_location(self):
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def grid_anchors(self, grid_sizes):
anchors = []
for size, stride, base_anchors in zip(
grid_sizes, self.strides, self.cell_anchors
):
grid_height, grid_width = size
device = base_anchors.device
shifts_x = torch.arange(
0, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append(
(shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
)
return anchors
def add_visibility_to(self, boxlist):
image_width, image_height = boxlist.size
anchors = boxlist.bbox
if self.straddle_thresh >= 0:
inds_inside = (
(anchors[..., 0] >= -self.straddle_thresh)
& (anchors[..., 1] >= -self.straddle_thresh)
& (anchors[..., 2] < image_width + self.straddle_thresh)
& (anchors[..., 3] < image_height + self.straddle_thresh)
)
else:
device = anchors.device
inds_inside = torch.ones(anchors.shape[0], dtype=torch.bool, device=device)
boxlist.add_field("visibility", inds_inside)
def forward(self, image_list, feature_maps):
grid_height, grid_width = feature_maps[0].shape[-2:]
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
anchors = []
for i, (image_height, image_width) in enumerate(image_list.image_sizes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
boxlist = BoxList(
anchors_per_feature_map, (image_width, image_height), mode="xyxy"
)
self.add_visibility_to(boxlist)
anchors_in_image.append(boxlist)
anchors.append(anchors_in_image)
return anchors
def make_anchor_generator(config):
anchor_sizes = config.MODEL.RPN.ANCHOR_SIZES
aspect_ratios = config.MODEL.RPN.ASPECT_RATIOS
anchor_stride = config.MODEL.RPN.ANCHOR_STRIDE
straddle_thresh = config.MODEL.RPN.STRADDLE_THRESH
if config.MODEL.RPN.USE_FPN:
assert len(anchor_stride) == len(
anchor_sizes
), "FPN should have len(ANCHOR_STRIDE) == len(ANCHOR_SIZES)"
else:
assert len(anchor_stride) == 1, "Non-FPN should have a single ANCHOR_STRIDE"
anchor_generator = AnchorGenerator(
anchor_sizes, aspect_ratios, anchor_stride, straddle_thresh
)
return anchor_generator
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors(
stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
are centered on stride / 2, have (approximate) sqrt areas of the specified
sizes, and aspect ratios as given.
"""
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride,
np.array(aspect_ratios, dtype=np.float),
)
def _generate_anchors(base_size, scales, aspect_ratios):
"""Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.
"""
anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1
anchors = _ratio_enum(anchor, aspect_ratios)
anchors = np.vstack(
[_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]
)
return torch.from_numpy(anchors)
def _whctrs(anchor):
"""Return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack(
(
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
)
)
return anchors
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
| 8,907 | 32.742424 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/rpn/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains specific functions for computing losses on the RPN
file
"""
import torch
from torch.nn import functional as F
from ..balanced_positive_negative_sampler import BalancedPositiveNegativeSampler
from ..utils import cat
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
class RPNLossComputation(object):
"""
This class computes the RPN loss.
"""
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
# self.target_preparator = target_preparator
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
def match_targets_to_anchors(self, anchor, target):
match_quality_matrix = boxlist_iou(target, anchor)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# RPN doesn't need any fields from target
# for creating the labels, so clear them all
target = target.copy_with_fields([])
# get the targets corresponding GT for each anchor
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, anchors, targets):
labels = []
regression_targets = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
matched_targets = self.match_targets_to_anchors(
anchors_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_idxs >= 0
labels_per_image = labels_per_image.to(dtype=torch.float32)
# discard anchors that go out of the boundaries of the image
labels_per_image[~anchors_per_image.get_field("visibility")] = -1
# discard indices that are between thresholds
inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[inds_to_discard] = -1
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, anchors_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return labels, regression_targets
def __call__(self, anchors, objectness, box_regression, targets):
"""
Arguments:
anchors (list[BoxList])
objectness (list[Tensor])
box_regression (list[Tensor])
targets (list[BoxList])
Returns:
objectness_loss (Tensor)
box_loss (Tensor
"""
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
labels, regression_targets = self.prepare_targets(anchors, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
objectness_flattened = []
box_regression_flattened = []
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_regression
for objectness_per_level, box_regression_per_level in zip(
objectness, box_regression
):
N, A, H, W = objectness_per_level.shape
objectness_per_level = objectness_per_level.permute(0, 2, 3, 1).reshape(
N, -1
)
box_regression_per_level = box_regression_per_level.view(N, -1, 4, H, W)
box_regression_per_level = box_regression_per_level.permute(0, 3, 4, 1, 2)
box_regression_per_level = box_regression_per_level.reshape(N, -1, 4)
objectness_flattened.append(objectness_per_level)
box_regression_flattened.append(box_regression_per_level)
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
objectness = cat(objectness_flattened, dim=1).reshape(-1)
box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4)
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds],
regression_targets[sampled_pos_inds],
beta=1.0 / 9,
size_average=False,
) / (sampled_inds.numel())
objectness_loss = F.binary_cross_entropy_with_logits(
objectness[sampled_inds], labels[sampled_inds]
)
return objectness_loss, box_loss
def make_rpn_loss_evaluator(cfg, box_coder):
matcher = Matcher(
cfg.MODEL.RPN.FG_IOU_THRESHOLD,
cfg.MODEL.RPN.BG_IOU_THRESHOLD,
allow_low_quality_matches=True,
)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, cfg.MODEL.RPN.POSITIVE_FRACTION
)
loss_evaluator = RPNLossComputation(matcher, fg_bg_sampler, box_coder)
return loss_evaluator
| 6,100 | 39.138158 | 87 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/rpn/rpn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from .loss import make_rpn_loss_evaluator
from .anchor_generator import make_anchor_generator
from .inference import make_rpn_postprocessor
class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
"""
def __init__(self, in_channels, num_anchors):
"""
Arguments:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
super(RPNHead, self).__init__()
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
for l in [self.conv, self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
t = F.relu(self.conv(feature))
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return logits, bbox_reg
class RPNModule(torch.nn.Module):
"""
Module for RPN computation. Takes feature maps from the backbone and RPN
proposals and losses. Works for both FPN and non-FPN.
"""
def __init__(self, cfg):
super(RPNModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator(cfg)
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
head = RPNHead(in_channels, anchor_generator.num_anchors_per_location()[0])
rpn_box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True)
box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False)
loss_evaluator = make_rpn_loss_evaluator(cfg, rpn_box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (list[BoxList): ground-truth boxes present in the image (optional)
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
objectness, rpn_box_regression = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, objectness, rpn_box_regression, targets)
else:
return self._forward_test(anchors, objectness, rpn_box_regression)
def _forward_train(self, anchors, objectness, rpn_box_regression, targets):
if self.cfg.MODEL.RPN_ONLY:
# When training an RPN-only model, the loss is determined by the
# predicted objectness and rpn_box_regression values and there is
# no need to transform the anchors into predicted boxes; this is an
# optimization that avoids the unnecessary transformation.
boxes = anchors
else:
# For end-to-end models, anchors must be transformed into boxes and
# sampled into a training batch.
with torch.no_grad():
boxes = self.box_selector_train(
anchors, objectness, rpn_box_regression, targets
)
loss_objectness, loss_rpn_box_reg = self.loss_evaluator(
anchors, objectness, rpn_box_regression, targets
)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return boxes, losses
def _forward_test(self, anchors, objectness, rpn_box_regression):
boxes = self.box_selector_test(anchors, objectness, rpn_box_regression)
if self.cfg.MODEL.RPN_ONLY:
# For end-to-end models, the RPN proposals are an intermediate state
# and don't bother to sort them in decreasing score order. For RPN-only
# models, the proposals are the final output and we return them in
# high-to-low confidence order.
inds = [
box.get_field("objectness").sort(descending=True)[1] for box in boxes
]
boxes = [box[ind] for box, ind in zip(boxes, inds)]
return boxes, {}
def build_rpn(cfg):
"""
This gives the gist of it. Not super important because it doesn't change as much
"""
return RPNModule(cfg)
| 5,430 | 37.792857 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/roi_heads.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .box_head.box_head import build_roi_box_head
from .mask_head.mask_head import build_roi_mask_head
class CombinedROIHeads(torch.nn.ModuleDict):
"""
Combines a set of individual heads (for box prediction or masks) into a single
head.
"""
def __init__(self, cfg, heads):
super(CombinedROIHeads, self).__init__(heads)
self.cfg = cfg.clone()
if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
self.mask.feature_extractor = self.box.feature_extractor
def forward(self, features, proposals, targets=None):
losses = {}
# TODO rename x to roi_box_features, if it doesn't increase memory consumption
x, detections, loss_box = self.box(features, proposals, targets)
losses.update(loss_box)
if self.cfg.MODEL.MASK_ON:
mask_features = features
# optimization: during training, if we share the feature extractor between
# the box and the mask heads, then we can reuse the features already computed
if (
self.training
and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR
):
mask_features = x
# During training, self.box() will return the unaltered proposals as "detections"
# this makes the API consistent during training and testing
x, detections, loss_mask = self.mask(mask_features, detections, targets)
losses.update(loss_mask)
return x, detections, losses
def build_roi_heads(cfg):
# individually create the heads, that will be combined together
# afterwards
roi_heads = []
if not cfg.MODEL.RPN_ONLY:
roi_heads.append(("box", build_roi_box_head(cfg)))
if cfg.MODEL.MASK_ON:
roi_heads.append(("mask", build_roi_mask_head(cfg)))
# combine individual heads in a single module
if roi_heads:
roi_heads = CombinedROIHeads(cfg, roi_heads)
return roi_heads
| 2,093 | 37.072727 | 93 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import torch
from PIL import Image
from torch import nn
import cv2
from torch.nn import functional as F
from maskrcnn_benchmark.structures.bounding_box import BoxList
# TODO check if want to return a single BoxList or a composite
# object
class MaskPostProcessor(nn.Module):
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
If a masker object is passed, it will additionally
project the masks in the image according to the locations in boxes,
"""
def __init__(self, masker=None):
super(MaskPostProcessor, self).__init__()
self.masker = masker
def forward(self, x, boxes):
"""
Arguments:
x (Tensor): the mask logits
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
# select masks coresponding to the predicted classes
num_masks = x.shape[0]
labels = [bbox.get_field("labels") for bbox in boxes]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
if self.masker:
mask_prob = self.masker(mask_prob, boxes)
boxes_per_image = [len(box) for box in boxes]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
results = []
for prob, box in zip(mask_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
bbox.add_field("mask", prob)
results.append(bbox)
return results
# TODO
class CharMaskPostProcessor(nn.Module):
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
If a masker object is passed, it will additionally
project the masks in the image according to the locations in boxes,
"""
def __init__(self, cfg, masker=None):
super(CharMaskPostProcessor, self).__init__()
self.masker = masker
self.cfg = cfg
def forward(self, x, char_mask, boxes, seq_outputs=None, seq_scores=None, detailed_seq_scores=None):
"""
Arguments:
x (Tensor): the mask logits
char_mask (Tensor): the char mask logits
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
char_mask_softmax = F.softmax(char_mask, dim=1)
image_width, image_height = boxes[0].size
char_results = {'char_mask': char_mask_softmax.cpu().numpy(), 'boxes': boxes[0].bbox.cpu().numpy(), 'seq_outputs': seq_outputs, 'seq_scores': seq_scores, 'detailed_seq_scores': detailed_seq_scores}
# select masks coresponding to the predicted classes
num_masks = x.shape[0]
mask_prob = mask_prob.squeeze(dim=1)[:, None]
if self.masker:
mask_prob = self.masker(mask_prob, boxes)
boxes_per_image = [len(box) for box in boxes]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
results = []
for prob, box in zip(mask_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
bbox.add_field("mask", prob)
results.append(bbox)
return [results, char_results]
class MaskPostProcessorCOCOFormat(MaskPostProcessor):
"""
From the results of the CNN, post process the results
so that the masks are pasted in the image, and
additionally convert the results to COCO format.
"""
def forward(self, x, boxes):
import pycocotools.mask as mask_util
import numpy as np
results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes)
for result in results:
masks = result.get_field("mask").cpu()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
result.add_field("mask", rles)
return results
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily gor paste_mask_in_image
def expand_boxes(boxes, scale):
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale[1]
h_half *= scale[0]
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def expand_masks(mask, padding):
N = mask.shape[0]
M_H = mask.shape[-2]
M_W = mask.shape[-1]
pad2 = 2 * padding
scale = (float(M_H + pad2) / M_H, float(M_W + pad2) / M_W)
padded_mask = mask.new_zeros((N, 1, M_H + pad2, M_W + pad2))
padded_mask[:, :, padding:-padding, padding:-padding] = mask
return padded_mask, scale
def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
padded_mask, scale = expand_masks(mask[None], padding=padding)
mask = padded_mask[0, 0]
box = expand_boxes(box[None], scale)[0]
box = box.numpy().astype(np.int32)
TO_REMOVE = 1
w = box[2] - box[0] + TO_REMOVE
h = box[3] - box[1] + TO_REMOVE
w = max(w, 1)
h = max(h, 1)
mask = Image.fromarray(mask.cpu().numpy())
mask = mask.resize((w, h), resample=Image.BILINEAR)
mask = np.array(mask, copy=False)
if thresh >= 0:
mask = np.array(mask > thresh, dtype=np.uint8)
mask = torch.from_numpy(mask)
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
mask = torch.from_numpy(mask * 255).to(torch.bool)
im_mask = torch.zeros((im_h, im_w), dtype=torch.bool)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, im_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
]
return im_mask
class Masker(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, threshold=0.5, padding=1):
self.threshold = threshold
self.padding = padding
def forward_single_image(self, masks, boxes):
boxes = boxes.convert("xyxy")
im_w, im_h = boxes.size
res = [
paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)
for mask, box in zip(masks, boxes.bbox)
]
if len(res) > 0:
res = torch.stack(res, dim=0)[:, None]
else:
res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))
return res
def __call__(self, masks, boxes):
# TODO do this properly
if isinstance(boxes, BoxList):
boxes = [boxes]
assert len(boxes) == 1, "Only single image batch supported"
result = self.forward_single_image(masks, boxes[0])
return result
def make_roi_mask_post_processor(cfg):
masker = None
if cfg.MODEL.CHAR_MASK_ON:
mask_post_processor = CharMaskPostProcessor(cfg, masker)
else:
mask_post_processor = MaskPostProcessor(masker)
return mask_post_processor
| 8,338 | 33.17623 | 205 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benchmark.layers import Conv2d
class MaskRCNNFPNFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg):
"""
Arguments:
num_classes (int): number of output classes
input_size (int): number of channels of the input once it's flattened
representation_size (int): size of the intermediate representation
"""
super(MaskRCNNFPNFeatureExtractor, self).__init__()
# resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
if cfg.MODEL.CHAR_MASK_ON:
resolution_h = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_H
resolution_w = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_W
else:
resolution_h = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
resolution_w = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution_h, resolution_w),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS
self.pooler = pooler
layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS
next_feature = input_size
self.blocks = []
for layer_idx, layer_features in enumerate(layers, 1):
layer_name = "mask_fcn{}".format(layer_idx)
module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1)
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(module.bias, 0)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
def forward(self, x, proposals):
x = self.pooler(x, proposals)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x
_ROI_MASK_FEATURE_EXTRACTORS = {
"ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor,
"MaskRCNNFPNFeatureExtractor": MaskRCNNFPNFeatureExtractor,
}
def make_roi_mask_feature_extractor(cfg):
func = _ROI_MASK_FEATURE_EXTRACTORS[cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR]
return func(cfg)
| 2,740 | 36.040541 | 87 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.utils import cat
def project_masks_on_boxes(segmentation_masks, proposals, discretization_size):
"""
Given segmentation masks and the bounding boxes corresponding
to the location of the masks in the image, this function
crops and resizes the masks in the position defined by the
boxes. This prepares the masks for them to be fed to the
loss computation as the targets.
Arguments:
segmentation_masks: an instance of SegmentationMask
proposals: an instance of BoxList
"""
masks = []
M = discretization_size
device = proposals.bbox.device
proposals = proposals.convert("xyxy")
assert segmentation_masks.size == proposals.size, "{}, {}".format(
segmentation_masks, proposals
)
# TODO put the proposals on the CPU, as the representation for the
# masks is not efficient GPU-wise (possibly several small tensors for
# representing a single instance mask)
proposals = proposals.bbox.to(torch.device("cpu"))
for segmentation_mask, proposal in zip(segmentation_masks, proposals):
# crop the masks, resize them to the desired resolution and
# then convert them to the tensor representation,
# instead of the list representation that was used
cropped_mask = segmentation_mask.crop(proposal)
scaled_mask = cropped_mask.resize((M, M))
mask = scaled_mask.convert(mode="mask")
masks.append(mask)
if len(masks) == 0:
return torch.empty(0, dtype=torch.float32, device=device)
return torch.stack(masks, dim=0).to(device, dtype=torch.float32)
class MaskRCNNLossComputation(object):
def __init__(self, proposal_matcher, discretization_size):
"""
Arguments:
proposal_matcher (Matcher)
discretization_size (int)
"""
self.proposal_matcher = proposal_matcher
self.discretization_size = discretization_size
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Mask RCNN needs "labels" and "masks "fields for creating the targets
target = target.copy_with_fields(["labels", "masks"])
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
masks = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# this can probably be removed, but is left here for clarity
# and completeness
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
# mask scores are only computed on positive samples
positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)
segmentation_masks = matched_targets.get_field("masks")
segmentation_masks = segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
masks_per_image = project_masks_on_boxes(
segmentation_masks, positive_proposals, self.discretization_size
)
labels.append(labels_per_image)
masks.append(masks_per_image)
return labels, masks
def __call__(self, proposals, mask_logits, targets):
"""
Arguments:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Return:
mask_loss (Tensor): scalar tensor containing the loss
"""
labels, mask_targets = self.prepare_targets(proposals, targets)
labels = cat(labels, dim=0)
mask_targets = cat(mask_targets, dim=0)
positive_inds = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[positive_inds]
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if mask_targets.numel() == 0:
return mask_logits.sum() * 0
mask_loss = F.binary_cross_entropy_with_logits(
mask_logits[positive_inds, labels_pos], mask_targets
)
return mask_loss
class CharMaskRCNNLossComputation(object):
def __init__(self, use_weighted_loss=False):
"""
Arguments:
proposal_matcher (Matcher)
discretization_size (int)
"""
self.use_weighted_loss = use_weighted_loss
def __call__(self, proposals, mask_logits, char_mask_logits, mask_targets, char_mask_targets, char_mask_weights):
"""
Arguments:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Return:
mask_loss (Tensor): scalar tensor containing the loss
"""
mask_targets = cat(mask_targets, dim=0)
char_mask_targets = cat(char_mask_targets, dim=0)
char_mask_weights = cat(char_mask_weights, dim=0)
char_mask_weights = char_mask_weights.mean(dim=0)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if mask_targets.numel() == 0 or char_mask_targets.numel() == 0:
return mask_logits.sum() * 0, char_mask_targets.sum() * 0
mask_loss = F.binary_cross_entropy_with_logits(
mask_logits.squeeze(dim=1), mask_targets
)
if self.use_weighted_loss:
char_mask_loss = F.cross_entropy(
char_mask_logits, char_mask_targets, char_mask_weights, ignore_index=-1
)
else:
char_mask_loss = F.cross_entropy(
char_mask_logits, char_mask_targets, ignore_index=-1
)
return mask_loss, char_mask_loss
def make_roi_mask_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
if cfg.MODEL.CHAR_MASK_ON:
loss_evaluator = CharMaskRCNNLossComputation(
use_weighted_loss=cfg.MODEL.ROI_MASK_HEAD.USE_WEIGHTED_CHAR_MASK
)
else:
loss_evaluator = MaskRCNNLossComputation(
matcher, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION
)
return loss_evaluator
| 7,330 | 37.584211 | 117 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_seq_predictors.py | # Written by Minghui Liao
import torch
from torch import nn
from torch.nn import functional as F
import math
import random
import numpy as np
from maskrcnn_benchmark.utils.chars import char2num, num2char
gpu_device = torch.device("cuda")
cpu_device = torch.device("cpu")
def reduce_mul(l):
out = 1.0
for x in l:
out *= x
return out
def check_all_done(seqs):
for seq in seqs:
if not seq[-1]:
return False
return True
# TODO
class SequencePredictor(nn.Module):
def __init__(self, cfg, dim_in):
super(SequencePredictor, self).__init__()
self.cfg = cfg
if cfg.SEQUENCE.TWO_CONV:
self.seq_encoder = nn.Sequential(nn.Conv2d(dim_in, dim_in, 3, padding=1), nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), nn.Conv2d(dim_in, 256, 3, padding=1), nn.ReLU(inplace=True))
else:
self.seq_encoder = nn.Sequential(nn.Conv2d(dim_in, 256, 3, padding=1), nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.seq_decoder = BahdanauAttnDecoderRNN(256, cfg.SEQUENCE.NUM_CHAR, cfg.SEQUENCE.NUM_CHAR, n_layers=1, dropout_p=0.1)
# self.criterion_seq_decoder = nn.NLLLoss(ignore_index = -1, reduce=False)
self.criterion_seq_decoder = nn.NLLLoss(ignore_index = -1, reduction='none')
self.rescale = nn.Upsample(size=(16, 64), mode='bilinear', align_corners=False)
self.x_onehot = nn.Embedding(32, 32)
self.x_onehot.weight.data = torch.eye(32)
self.y_onehot = nn.Embedding(8, 8)
self.y_onehot.weight.data = torch.eye(8)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x, decoder_targets=None, word_targets=None, use_beam_search=False):
rescale_out = self.rescale(x)
seq_decoder_input = self.seq_encoder(rescale_out)
x_t, y_t = np.meshgrid(np.linspace(0, 31, 32), np.linspace(0, 7, 8)) # (h, w)
x_t = torch.LongTensor(x_t, device=cpu_device).cuda()
y_t = torch.LongTensor(y_t, device=cpu_device).cuda()
x_onehot_embedding = self.x_onehot(x_t).transpose(0, 2).transpose(1, 2).repeat(seq_decoder_input.size(0),1,1,1)
y_onehot_embedding = self.y_onehot(y_t).transpose(0, 2).transpose(1, 2).repeat(seq_decoder_input.size(0),1,1,1)
seq_decoder_input_loc = torch.cat([seq_decoder_input, x_onehot_embedding, y_onehot_embedding], 1)
seq_decoder_input_reshape = seq_decoder_input_loc.view(seq_decoder_input_loc.size(0), seq_decoder_input_loc.size(1), -1).transpose(0, 2).transpose(1, 2)
if self.training:
bos_onehot = np.zeros((seq_decoder_input_reshape.size(1), 1), dtype=np.int32)
bos_onehot[:, 0] = self.cfg.SEQUENCE.BOS_TOKEN
decoder_input = torch.tensor(bos_onehot.tolist(), device=gpu_device)
decoder_hidden = torch.zeros((seq_decoder_input_reshape.size(1), 256), device=gpu_device)
use_teacher_forcing = True if random.random() < self.cfg.SEQUENCE.TEACHER_FORCE_RATIO else False
target_length = decoder_targets.size(1)
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input, decoder_hidden, seq_decoder_input_reshape)
if di == 0:
loss_seq_decoder = self.criterion_seq_decoder(decoder_output, word_targets[:,di])
else:
loss_seq_decoder += self.criterion_seq_decoder(decoder_output, word_targets[:,di])
decoder_input = decoder_targets[:, di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input, decoder_hidden, seq_decoder_input_reshape)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze(1).detach() # detach from history as input
if di == 0:
loss_seq_decoder = self.criterion_seq_decoder(decoder_output, word_targets[:,di])
else:
loss_seq_decoder += self.criterion_seq_decoder(decoder_output, word_targets[:,di])
loss_seq_decoder = loss_seq_decoder.sum() / loss_seq_decoder.size(0)
loss_seq_decoder = 0.2 * loss_seq_decoder
return loss_seq_decoder
else:
words = []
decoded_scores = []
detailed_decoded_scores = []
real_length = 0
if use_beam_search:
for batch_index in range(seq_decoder_input_reshape.size(1)):
decoder_hidden = torch.zeros((1, 256), device=gpu_device)
word = []
char_scores = []
detailed_char_scores = []
top_seqs = self.beam_search(seq_decoder_input_reshape[:,batch_index:batch_index+1,:], decoder_hidden, beam_size=6, max_len=self.cfg.SEQUENCE.MAX_LENGTH)
top_seq = top_seqs[0]
for character in top_seq[1:]:
character_index = character[0]
if character_index == self.cfg.SEQUENCE.NUM_CHAR - 1:
char_scores.append(character[1])
detailed_char_scores.append(character[2])
break
else:
if character_index == 0:
word.append('~')
char_scores.append(0.)
else:
word.append(num2char(character_index))
char_scores.append(character[1])
detailed_char_scores.append(character[2])
words.append(''.join(word))
decoded_scores.append(char_scores)
detailed_decoded_scores.append(detailed_char_scores)
else:
for batch_index in range(seq_decoder_input_reshape.size(1)):
bos_onehot = np.zeros((1, 1), dtype=np.int32)
bos_onehot[:, 0] = self.cfg.SEQUENCE.BOS_TOKEN
decoder_input = torch.tensor(bos_onehot.tolist(), device=gpu_device)
decoder_hidden = torch.zeros((1, 256), device=gpu_device)
word = []
char_scores = []
for di in range(self.cfg.SEQUENCE.MAX_LENGTH):
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input, decoder_hidden, seq_decoder_input_reshape[:,batch_index:batch_index+1,:])
# decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
char_scores.append(topv.item())
if topi.item() == self.cfg.SEQUENCE.NUM_CHAR - 1:
break
else:
if topi.item() == 0:
word.append('~')
else:
word.append(num2char(topi.item()))
real_length = di
decoder_input = topi.squeeze(1).detach()
words.append(''.join(word))
decoded_scores.append(char_scores)
return words, decoded_scores, detailed_decoded_scores
def beam_search_step(self, encoder_context, top_seqs, k):
all_seqs = []
for seq in top_seqs:
seq_score = reduce_mul([_score for _,_score,_,_ in seq])
if seq[-1][0] == self.cfg.SEQUENCE.NUM_CHAR - 1:
all_seqs.append((seq, seq_score, seq[-1][2], True))
continue
decoder_hidden = seq[-1][-1][0]
onehot = np.zeros((1, 1), dtype=np.int32)
onehot[:, 0] = seq[-1][0]
decoder_input = torch.tensor(onehot.tolist(), device=gpu_device)
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input, decoder_hidden, encoder_context)
detailed_char_scores = decoder_output.cpu().numpy()
# print(decoder_output.shape)
scores, candidates = decoder_output.data[:,1:].topk(k)
for i in range(k):
character_score = scores[:, i]
character_index = candidates[:, i]
score = seq_score * character_score.item()
char_score = seq_score * detailed_char_scores
rs_seq = seq + [(character_index.item() + 1, character_score.item(), char_score, [decoder_hidden])]
done = (character_index.item() + 1 == self.cfg.SEQUENCE.NUM_CHAR - 1)
all_seqs.append((rs_seq, score, char_score, done))
all_seqs = sorted(all_seqs, key = lambda seq: seq[1], reverse=True)
topk_seqs = [seq for seq,_,_,_ in all_seqs[:k]]
all_done = check_all_done(all_seqs[:k])
return topk_seqs, all_done
def beam_search(self, encoder_context, decoder_hidden, beam_size=6, max_len=32):
char_score = np.zeros(38,)
top_seqs = [[(self.cfg.SEQUENCE.BOS_TOKEN, 1.0, char_score, [decoder_hidden])]]
#loop
for _ in range(max_len):
top_seqs, all_done = self.beam_search_step(encoder_context, top_seqs, beam_size)
if all_done:
break
return top_seqs
class Attn(nn.Module):
def __init__(self, method, hidden_size, embed_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.embed_size = embed_size
self.attn = nn.Linear(2 * self.hidden_size + 32 + 8, hidden_size)
# self.attn = nn.Linear(hidden_size, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs):
'''
:param hidden:
previous hidden state of the decoder, in shape (B, hidden_size)
:param encoder_outputs:
encoder outputs from Encoder, in shape (H*W, B, hidden_size)
:return
attention energies in shape (B, H*W)
'''
max_len = encoder_outputs.size(0)
this_batch_size = encoder_outputs.size(1)
H = hidden.repeat(max_len,1,1).transpose(0,1) # (B, H*W, hidden_size)
encoder_outputs = encoder_outputs.transpose(0,1) # (B, H*W, hidden_size)
attn_energies = self.score(H,encoder_outputs) # compute attention score (B, H*W)
return F.softmax(attn_energies, dim=1).unsqueeze(1) # normalize with softmax (B, 1, H*W)
def score(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) # (B, H*W, 2*hidden_size+H+W)->(B, H*W, hidden_size)
energy = energy.transpose(2,1) # (B, hidden_size, H*W)
v = self.v.repeat(encoder_outputs.data.shape[0],1).unsqueeze(1) # (B, 1, hidden_size)
energy = torch.bmm(v,energy) # (B, 1, H*W)
return energy.squeeze(1) # (B, H*W)
class BahdanauAttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, embed_size, output_size, n_layers=1, dropout_p=0, bidirectional=False):
super(BahdanauAttnDecoderRNN, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.embed_size = embed_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
# Define layers
self.embedding = nn.Embedding(output_size, embed_size)
self.embedding.weight.data = torch.eye(embed_size)
# self.dropout = nn.Dropout(dropout_p)
self.word_linear = nn.Linear(embed_size, hidden_size)
self.attn = Attn('concat', hidden_size, embed_size)
self.rnn = nn.GRUCell(2 * hidden_size + 32 + 8, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, word_input, last_hidden, encoder_outputs):
'''
:param word_input:
word input for current time step, in shape (B)
:param last_hidden:
last hidden stat of the decoder, in shape (layers*direction*B, hidden_size)
:param encoder_outputs:
encoder outputs in shape (H*W, B, C)
:return
decoder output
'''
# Get the embedding of the current input word (last output word)
word_embedded_onehot = self.embedding(word_input).view(1, word_input.size(0), -1) # (1,B,embed_size)
word_embedded = self.word_linear(word_embedded_onehot) #(1, B, hidden_size)
attn_weights = self.attn(last_hidden, encoder_outputs) # (B, 1, H*W)
context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # (B, 1, H*W) * (B, H*W, C) = (B,1,C)
context = context.transpose(0, 1) # (1,B,C)
# Combine embedded input word and attended context, run through RNN
# 2 * hidden_size + W + H: 256 + 256 + 32 + 8 = 552
rnn_input = torch.cat((word_embedded, context), 2)
last_hidden = last_hidden.view(last_hidden.size(0), -1)
rnn_input = rnn_input.view(word_input.size(0), -1)
hidden = self.rnn(rnn_input, last_hidden)
if not self.training:
output = F.softmax(self.out(hidden), dim=1)
else:
output = F.log_softmax(self.out(hidden), dim=1)
# Return final output, hidden state
# print(output.shape)
return output, hidden, attn_weights
def make_roi_seq_predictor(cfg, dim_in):
return SequencePredictor(cfg, dim_in) | 14,496 | 51.33574 | 172 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.layers import ConvTranspose2d
from .roi_seq_predictors import make_roi_seq_predictor
class MaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(MaskRCNNC4Predictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
x = F.relu(self.conv5_mask(x))
return self.mask_fcn_logits(x)
class CharMaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(CharMaskRCNNC4Predictor, self).__init__()
# num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_classes = 1
char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
if cfg.MODEL.CHAR_MASK_ON:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0)
else:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
x = F.relu(self.conv5_mask(x))
return self.mask_fcn_logits(x), self.char_mask_fcn_logits(x)
class SeqCharMaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(SeqCharMaskRCNNC4Predictor, self).__init__()
# num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_classes = 1
char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
if cfg.MODEL.CHAR_MASK_ON:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0)
self.seq = make_roi_seq_predictor(cfg, dim_reduced)
else:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x, decoder_targets=None, word_targets=None):
x = F.relu(self.conv5_mask(x))
if self.training:
loss_seq_decoder = self.seq(x, decoder_targets=decoder_targets, word_targets=word_targets)
return self.mask_fcn_logits(x), self.char_mask_fcn_logits(x), loss_seq_decoder
else:
decoded_chars, decoded_scores, detailed_decoded_scores = self.seq(x, use_beam_search=True)
return self.mask_fcn_logits(x), self.char_mask_fcn_logits(x), decoded_chars, decoded_scores, detailed_decoded_scores
_ROI_MASK_PREDICTOR = {"MaskRCNNC4Predictor": MaskRCNNC4Predictor, "CharMaskRCNNC4Predictor": CharMaskRCNNC4Predictor, "SeqCharMaskRCNNC4Predictor": SeqCharMaskRCNNC4Predictor}
def make_roi_mask_predictor(cfg):
func = _ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]
return func(cfg)
| 5,358 | 43.658333 | 176 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .roi_mask_feature_extractors import make_roi_mask_feature_extractor
from .roi_mask_predictors import make_roi_mask_predictor
from .inference import make_roi_mask_post_processor
from .loss import make_roi_mask_loss_evaluator
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.utils import cat
def keep_only_positive_boxes(boxes, batch_size_per_im):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
if len(inds) > batch_size_per_im:
new_inds = inds[:batch_size_per_im]
inds_mask[inds[batch_size_per_im:]] = 0
else:
new_inds = inds
positive_boxes.append(boxes_per_image[new_inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds
# TODO
def project_char_masks_on_boxes(segmentation_masks, segmentation_char_masks, proposals, discretization_size):
"""
Given segmentation masks and the bounding boxes corresponding
to the location of the masks in the image, this function
crops and resizes the masks in the position defined by the
boxes. This prepares the masks for them to be fed to the
loss computation as the targets.
Arguments:
segmentation_masks: an instance of SegmentationMask
proposals: an instance of BoxList
"""
masks = []
char_masks = []
char_mask_weights = []
decoder_targets = []
word_targets = []
M_H, M_W = discretization_size[0], discretization_size[1]
device = proposals.bbox.device
proposals = proposals.convert("xyxy")
assert segmentation_masks.size == proposals.size, "{}, {}".format(
segmentation_masks, proposals
)
assert segmentation_char_masks.size == proposals.size, "{}, {}".format(
segmentation_char_masks, proposals
)
# TODO put the proposals on the CPU, as the representation for the
# masks is not efficient GPU-wise (possibly several small tensors for
# representing a single instance mask)
proposals = proposals.bbox.to(torch.device("cpu"))
for segmentation_mask, segmentation_char_mask, proposal in zip(segmentation_masks, segmentation_char_masks, proposals):
# crop the masks, resize them to the desired resolution and
# then convert them to the tensor representation,
# instead of the list representation that was used
cropped_mask = segmentation_mask.crop(proposal)
scaled_mask = cropped_mask.resize((M_W, M_H))
mask = scaled_mask.convert(mode="mask")
masks.append(mask)
cropped_char_mask = segmentation_char_mask.crop(proposal)
scaled_char_mask = cropped_char_mask.resize((M_W, M_H))
char_mask, char_mask_weight, decoder_target, word_target = scaled_char_mask.convert(mode="seq_char_mask")
char_masks.append(char_mask)
char_mask_weights.append(char_mask_weight)
decoder_targets.append(decoder_target)
word_targets.append(word_target)
if len(masks) == 0:
return torch.empty(0, dtype=torch.float32, device=device), torch.empty(0, dtype=torch.long, device=device), torch.empty(0, dtype=torch.float32, device=device), torch.empty(0, dtype=torch.long, device=device)
return torch.stack(masks, dim=0).to(device, dtype=torch.float32), torch.stack(char_masks, dim=0).to(device, dtype=torch.long), torch.stack(char_mask_weights, dim=0).to(device, dtype=torch.float32), torch.stack(decoder_targets, dim=0).to(device, dtype=torch.long), torch.stack(word_targets, dim=0).to(device, dtype=torch.long)
class ROIMaskHead(torch.nn.Module):
def __init__(self, cfg, proposal_matcher, discretization_size):
super(ROIMaskHead, self).__init__()
self.proposal_matcher = proposal_matcher
self.discretization_size = discretization_size
self.cfg = cfg.clone()
self.feature_extractor = make_roi_mask_feature_extractor(cfg)
self.predictor = make_roi_mask_predictor(cfg)
self.post_processor = make_roi_mask_post_processor(cfg)
self.loss_evaluator = make_roi_mask_loss_evaluator(cfg)
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Mask RCNN needs "labels" and "masks "fields for creating the targets
target = target.copy_with_fields(["labels", "masks", "char_masks"])
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
masks = []
char_masks = []
char_mask_weights = []
decoder_targets = []
word_targets = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# this can probably be removed, but is left here for clarity
# and completeness
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
# mask scores are only computed on positive samples
positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)
segmentation_masks = matched_targets.get_field("masks")
segmentation_masks = segmentation_masks[positive_inds]
char_segmentation_masks = matched_targets.get_field("char_masks")
char_segmentation_masks = char_segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
masks_per_image, char_masks_per_image, char_masks_weight_per_image, decoder_targets_per_image, word_targets_per_image = project_char_masks_on_boxes(
segmentation_masks, char_segmentation_masks, positive_proposals, self.discretization_size
)
masks.append(masks_per_image)
char_masks.append(char_masks_per_image)
char_mask_weights.append(char_masks_weight_per_image)
decoder_targets.append(decoder_targets_per_image)
word_targets.append(word_targets_per_image)
return masks, char_masks, char_mask_weights, decoder_targets, word_targets
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the original proposals
are returned. During testing, the predicted boxlists are returned
with the `mask` field set
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training:
# during training, only focus on positive boxes
all_proposals = proposals
proposals, positive_inds = keep_only_positive_boxes(proposals, self.cfg.MODEL.ROI_MASK_HEAD.MASK_BATCH_SIZE_PER_IM)
if self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
x = features
x = x[torch.cat(positive_inds, dim=0)]
else:
x = self.feature_extractor(features, proposals)
if self.training and self.cfg.MODEL.CHAR_MASK_ON:
mask_targets, char_mask_targets, char_mask_weights, decoder_targets, word_targets = self.prepare_targets(proposals, targets)
decoder_targets = cat(decoder_targets, dim=0)
word_targets = cat(word_targets, dim=0)
if self.cfg.MODEL.CHAR_MASK_ON:
if self.cfg.SEQUENCE.SEQ_ON:
if not self.training:
if x.numel()>0:
mask_logits, char_mask_logits, seq_outputs, seq_scores, detailed_seq_scores = self.predictor(x)
result = self.post_processor(mask_logits, char_mask_logits, proposals, seq_outputs=seq_outputs, seq_scores=seq_scores, detailed_seq_scores=detailed_seq_scores)
return x, result, {}
else:
return None, None, {}
mask_logits, char_mask_logits, seq_outputs = self.predictor(x, decoder_targets=decoder_targets, word_targets=word_targets)
loss_mask, loss_char_mask = self.loss_evaluator(proposals, mask_logits, char_mask_logits, mask_targets, char_mask_targets, char_mask_weights)
return x, all_proposals, dict(loss_mask=loss_mask, loss_char_mask=loss_char_mask, loss_seq=seq_outputs)
else:
mask_logits, char_mask_logits = self.predictor(x)
if not self.training:
result = self.post_processor(mask_logits, char_mask_logits, proposals)
return x, result, {}
loss_mask, loss_char_mask = self.loss_evaluator(proposals, mask_logits, char_mask_logits, mask_targets, char_mask_targets, char_mask_weights)
return x, all_proposals, dict(loss_mask=loss_mask, loss_char_mask=loss_char_mask)
else:
mask_logits = self.predictor(x)
if not self.training:
result = self.post_processor(mask_logits, proposals)
return x, result, {}
loss_mask = self.loss_evaluator(proposals, mask_logits, targets)
return x, all_proposals, dict(loss_mask=loss_mask)
def build_roi_mask_head(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
return ROIMaskHead(cfg, matcher, (cfg.MODEL.ROI_MASK_HEAD.RESOLUTION_H, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION_W))
| 11,105 | 49.027027 | 329 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
class PostProcessor(nn.Module):
"""
From a set of classification scores, box regression and proposals,
computes the post-processed boxes, and applies NMS to obtain the
final results
"""
def __init__(
self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None
):
"""
Arguments:
score_thresh (float)
nms (float)
detections_per_img (int)
box_coder (BoxCoder)
"""
super(PostProcessor, self).__init__()
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
if box_coder is None:
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
self.box_coder = box_coder
def forward(self, x, boxes):
"""
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
"""
class_logits, box_regression = x
class_prob = F.softmax(class_logits, -1)
# TODO think about a representation of batch of boxes
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
proposals = self.box_coder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
num_classes = class_prob.shape[1]
proposals = proposals.split(boxes_per_image, dim=0)
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for prob, boxes_per_img, image_shape in zip(
class_prob, proposals, image_shapes
):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results
def prepare_boxlist(self, boxes, scores, image_shape):
"""
Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
boxlist = BoxList(boxes, image_shape, mode="xyxy")
boxlist.add_field("scores", scores)
return boxlist
def filter_results(self, boxlist, num_classes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
"""
# unwrap the boxlist to avoid additional overhead.
# if we had multi-class NMS, we could perform this directly on the boxlist
boxes = boxlist.bbox.reshape(-1, num_classes * 4)
scores = boxlist.get_field("scores").reshape(-1, num_classes)
device = scores.device
result = []
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
inds_all = scores > self.score_thresh
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms, score_field="scores"
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
)
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.detections_per_img > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
def make_roi_box_post_processor(cfg):
use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH
nms_thresh = cfg.MODEL.ROI_HEADS.NMS
detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG
postprocessor = PostProcessor(
score_thresh, nms_thresh, detections_per_img, box_coder
)
return postprocessor
| 6,029 | 38.411765 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.modeling.backbone import resnet
from maskrcnn_benchmark.modeling.poolers import Pooler
class ResNet50Conv5ROIFeatureExtractor(nn.Module):
def __init__(self, config):
super(ResNet50Conv5ROIFeatureExtractor, self).__init__()
resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
stage = resnet.StageSpec(index=4, block_count=3, return_features=False)
head = resnet.ResNetHead(
block_module=config.MODEL.RESNETS.TRANS_FUNC,
stages=(stage,),
num_groups=config.MODEL.RESNETS.NUM_GROUPS,
width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP,
stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1,
stride_init=None,
res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS,
)
self.pooler = pooler
self.head = head
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.head(x)
return x
class FPN2MLPFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg):
super(FPN2MLPFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.pooler = pooler
self.fc6 = nn.Linear(input_size, representation_size)
self.fc7 = nn.Linear(representation_size, representation_size)
for l in [self.fc6, self.fc7]:
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.constant_(l.bias, 0)
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
_ROI_BOX_FEATURE_EXTRACTORS = {
"ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor,
"FPN2MLPFeatureExtractor": FPN2MLPFeatureExtractor,
}
def make_roi_box_feature_extractor(cfg):
func = _ROI_BOX_FEATURE_EXTRACTORS[cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR]
return func(cfg)
| 2,997 | 32.685393 | 80 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator
class ROIBoxHead(torch.nn.Module):
"""
Generic Box Head class.
"""
def __init__(self, cfg):
super(ROIBoxHead, self).__init__()
self.feature_extractor = make_roi_box_feature_extractor(cfg)
self.predictor = make_roi_box_predictor(cfg)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
# final classifier that converts the features into predictions
class_logits, box_regression = self.predictor(x)
if not self.training:
result = self.post_processor((class_logits, box_regression), proposals)
return x, result, {}
loss_classifier, loss_box_reg = self.loss_evaluator(
[class_logits], [box_regression]
)
return (
x,
proposals,
dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg),
)
def build_roi_box_head(cfg):
"""
Constructs a new box head.
By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class
and make it a parameter in the config
"""
return ROIBoxHead(cfg)
| 2,663 | 36.521127 | 96 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from maskrcnn_benchmark.modeling.utils import cat
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields("labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return labels, regression_targets
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
labels, regression_targets = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, regression_targets_per_image, proposals_per_image in zip(
labels, regression_targets, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def __call__(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = F.cross_entropy(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
loss_evaluator = FastRCNNLossComputation(matcher, fg_bg_sampler, box_coder)
return loss_evaluator
| 6,664 | 36.869318 | 90 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
class FastRCNNPredictor(nn.Module):
def __init__(self, config, pretrained=None):
super(FastRCNNPredictor, self).__init__()
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7)
self.cls_score = nn.Linear(num_inputs, num_classes)
self.bbox_pred = nn.Linear(num_inputs, num_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
class FPNPredictor(nn.Module):
def __init__(self, cfg):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.cls_score = nn.Linear(representation_size, num_classes)
self.bbox_pred = nn.Linear(representation_size, num_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
_ROI_BOX_PREDICTOR = {
"FastRCNNPredictor": FastRCNNPredictor,
"FPNPredictor": FPNPredictor,
}
def make_roi_box_predictor(cfg):
func = _ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg)
| 2,066 | 31.809524 | 72 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/structures/image_list.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
"""
def __init__(self, tensors, image_sizes):
"""
Arguments:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
def to_image_list(tensors, size_divisible=0):
"""
tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape
"""
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes)
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
| 2,385 | 33.57971 | 87 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/structures/segmentation_mask.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import pycocotools.mask as mask_utils
import numpy as np
import cv2
from PIL import Image
from maskrcnn_benchmark.utils.chars import num2char, char2num
from shapely.geometry import Polygon
from shapely import affinity
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Mask(object):
"""
This class is unfinished and not meant for use yet
It is supposed to contain the mask for an object as
a 2d tensor
"""
def __init__(self, masks, size, mode):
self.masks = masks
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 2
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
flip_idx = list(range(dim)[::-1])
flipped_masks = self.masks.index_select(dim, flip_idx)
return Mask(flipped_masks, self.size, self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]]
return Mask(cropped_masks, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
pass
class SegmentationMask(object):
"""
This class stores the segmentations for all objects in the image
"""
def __init__(self, polygons, size, mode=None):
"""
Arguments:
polygons: a list of list of lists of numbers. The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
object, and the third level to the polygon coordinates.
"""
assert isinstance(polygons, list)
self.polygons = [Polygons(p, size, mode) for p in polygons]
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for polygon in self.polygons:
flipped.append(polygon.transpose(method))
return SegmentationMask(flipped, size=self.size, mode=self.mode)
def crop(self, box,keep_ind=None):
w, h = box[2] - box[0], box[3] - box[1]
if keep_ind is not None:
self.polygons=np.array(self.polygons)
self.polygons = self.polygons[keep_ind]
cropped = []
for polygon in self.polygons:
cropped.append(polygon.crop(box))
return SegmentationMask(cropped, size=(w, h), mode=self.mode)
def rotate(self, angle, r_c, start_h, start_w):
rotated=[]
for polygon in self.polygons:
rotated.append(polygon.rotate(angle,r_c,start_h,start_w))
return SegmentationMask(rotated,size=(r_c[0]*2,r_c[1]*2),mode=self.mode)
def resize(self, size, *args, **kwargs):
scaled = []
for polygon in self.polygons:
scaled.append(polygon.resize(size, *args, **kwargs))
return SegmentationMask(scaled, size=size, mode=self.mode)
def to(self, *args, **kwargs):
return self
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_polygons = [self.polygons[item]]
else:
# advanced indexing on a single dimension
selected_polygons = []
if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
selected_polygons.append(self.polygons[i])
return SegmentationMask(selected_polygons, size=self.size, mode=self.mode)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
class Polygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size, mode):
# assert isinstance(polygons, list), '{}'.format(polygons)
if isinstance(polygons, list):
polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]
elif isinstance(polygons, Polygons):
polygons = polygons.polygons
self.polygons = polygons
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return Polygons(flipped_polygons, size=self.size, mode=self.mode)
def rotate(self, angle, r_c, start_h, start_w):
poly=self.polygons[0].numpy().reshape(-1,2)
poly[:, 0]+=start_w
poly[:, 1]+=start_h
polys=Polygon(poly)
r_polys=list(affinity.rotate(polys,angle,r_c).boundary.coords[:-1])
p=[]
for r in r_polys:
p+=list(r)
return Polygons([p], size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return Polygons(cropped_polygons, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return Polygons(scaled_polys, size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return Polygons(scaled_polygons, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "mask":
rles = mask_utils.frPyObjects(
[p.numpy() for p in self.polygons], height, width
)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
# TODO add squeeze?
return mask
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_polygons={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class CharPolygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, char_boxes, word=None, use_char_ann=False, char_classes=None, size=None, mode=None):
if isinstance(char_boxes, CharPolygons):
if char_classes is None:
char_classes = char_boxes.char_classes
self.word = char_boxes.word
char_boxes = char_boxes.char_boxes
else:
if char_classes is None:
char_classes = [torch.as_tensor(p[8], dtype=torch.float32) for p in char_boxes]
char_boxes = [torch.as_tensor(p[:8], dtype=torch.float32) for p in char_boxes]
self.word = word
self.char_boxes = char_boxes
self.char_classes = char_classes
self.size = size
self.mode = mode
self.use_char_ann = use_char_ann
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for char_box in self.char_boxes:
p = char_box.clone()
TO_REMOVE = 1
p[idx::2] = dim - char_box[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return CharPolygons(flipped_polygons, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for char_box in self.char_boxes:
p = char_box.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return CharPolygons(cropped_polygons, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=(w, h), mode=self.mode)
def rotate(self, angle, r_c, start_h, start_w):
r_polys = []
for poly in self.char_boxes:
poly = poly.numpy()
poly[0::2] += start_w
poly[1::2] += start_h
poly= Polygon(np.array(poly).reshape(4,2))
r_poly = np.array(list(affinity.rotate(poly, angle, r_c).boundary.coords[:-1])).reshape(-1, 8)
r_polys.append(r_poly[0])
return CharPolygons(r_polys, word=self.word, use_char_ann=self.use_char_ann,char_classes=self.char_classes,size=(r_c[0]*2,r_c[1]*2), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.char_boxes]
return CharPolygons(scaled_polys, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.char_boxes:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return CharPolygons(scaled_polygons, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "char_mask":
if not self.use_char_ann:
char_map = -np.ones((height, width))
char_map_weight = np.zeros((37,))
else:
char_map = np.zeros((height, width))
char_map_weight = np.ones((37,))
for i, p in enumerate(self.char_boxes):
poly=p.numpy().reshape(4,2)
# x_center = np.mean(poly[:,0], axis = 0).astype(np.int32)
# y_center = np.mean(poly[:,1], axis = 0).astype(np.int32)
poly=shrink_poly(poly,0.25)
cv2.fillPoly(char_map,[poly.astype(np.int32)],int(self.char_classes[i]))
# if is_poly_inbox(poly, height, width) and x_center>=0 and x_center<width and y_center>=0 and y_center<height:
# spoly=shrink_rect(poly,0.25)
# spoly = spoly.astype(np.int32)
# sbox_xmin_shrink = max(0, min(spoly[:,0]))
# sbox_xmax_shrink = min(width - 1, max(spoly[:,0]))
# sbox_ymin_shrink = max(0, min(spoly[:,1]))
# sbox_ymax_shrink = min(height - 1, max(spoly[:,1]))
# ## very small char box
# if sbox_xmax_shrink == sbox_xmin_shrink:
# sbox_xmax_shrink = sbox_xmin_shrink + 1
# if sbox_ymax_shrink == sbox_ymin_shrink:
# sbox_ymax_shrink = sbox_ymin_shrink + 1
# char_map[sbox_ymin_shrink:sbox_ymax_shrink, sbox_xmin_shrink:sbox_xmax_shrink] = int(self.char_classes[i])
pos_index = np.where(char_map > 0)
pos_num = pos_index[0].size
if pos_num > 0:
pos_weight = 1.0 * (height*width - pos_num)/pos_num
char_map_weight[1:] = pos_weight
return torch.from_numpy(char_map), torch.from_numpy(char_map_weight)
elif mode == "seq_char_mask":
decoder_target = (38 - 1) * np.ones((32, ))
word_target = -np.ones((32, ))
if not self.use_char_ann:
char_map = -np.ones((height, width))
char_map_weight = np.zeros((37,))
for i, char in enumerate(self.word):
if i > 31:
break
decoder_target[i] = char2num(char)
word_target[i] = char2num(char)
end_point = min(max(1, len(self.word)), 31)
word_target[end_point] = 37
else:
char_map = np.zeros((height, width))
char_map_weight = np.ones((37,))
word_length = 0
for i, p in enumerate(self.char_boxes):
poly=p.numpy().reshape(4,2)
# x_center = np.mean(poly[:,0], axis = 0).astype(np.int32)
# y_center = np.mean(poly[:,1], axis = 0).astype(np.int32)
# if is_poly_inbox(poly, height, width):
if i < 32:
decoder_target[i] = int(self.char_classes[i])
word_target[i] = int(self.char_classes[i])
word_length += 1
poly=shrink_poly(poly,0.25)
cv2.fillPoly(char_map,[poly.astype(np.int32)],int(self.char_classes[i]))
# if x_center>=0 and x_center<width and y_center>=0 and y_center<height:
# spoly=shrink_rect(poly,0.25)
# spoly = spoly.astype(np.int32)
# sbox_xmin_shrink = max(0, min(spoly[:,0]))
# sbox_xmax_shrink = min(width - 1, max(spoly[:,0]))
# sbox_ymin_shrink = max(0, min(spoly[:,1]))
# sbox_ymax_shrink = min(height - 1, max(spoly[:,1]))
# ## very small char box
# if sbox_xmax_shrink == sbox_xmin_shrink:
# sbox_xmax_shrink = sbox_xmin_shrink + 1
# if sbox_ymax_shrink == sbox_ymin_shrink:
# sbox_ymax_shrink = sbox_ymin_shrink + 1
# char_map[sbox_ymin_shrink:sbox_ymax_shrink, sbox_xmin_shrink:sbox_xmax_shrink] = int(self.char_classes[i])
end_point = min(max(1, word_length), 31)
word_target[end_point] = 37
pos_index = np.where(char_map > 0)
pos_num = pos_index[0].size
if pos_num > 0:
pos_weight = 1.0 * (height*width - pos_num)/pos_num
char_map_weight[1:] = pos_weight
return torch.from_numpy(char_map), torch.from_numpy(char_map_weight), torch.from_numpy(decoder_target), torch.from_numpy(word_target)
def creat_color_map(self, n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits-1))
for j in range(splits):
g = int(j * width * 1.0 / (splits-1))
for k in range(splits-1):
b = int(k * width * 1.0 / (splits-1))
maps.append([r, g, b])
return np.array(maps)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_char_boxes={}, ".format(len(self.char_boxes))
s += "num_char_classes={}, ".format(len(self.char_classes))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class SegmentationCharMask(object):
def __init__(self, chars_boxes, words=None, use_char_ann=True, size=None, mode=None):
# self.chars_boxes=[CharPolygons(char_boxes, word=word, use_char_ann=use_char_ann, size=size, mode=mode) for char_boxes, word in zip(chars_boxes, words)]
if words is None:
self.chars_boxes=[CharPolygons(char_boxes, word=None, use_char_ann=use_char_ann, size=size, mode=mode) for char_boxes in chars_boxes]
else:
self.chars_boxes=[CharPolygons(char_boxes, word=words[i], use_char_ann=use_char_ann, size=size, mode=mode) for i, char_boxes in enumerate(chars_boxes)]
self.size=size
self.mode=mode
self.use_char_ann=use_char_ann
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for char_boxes in self.chars_boxes:
flipped.append(char_boxes.transpose(method))
return SegmentationCharMask(flipped, use_char_ann=self.use_char_ann, size=self.size, mode=self.mode)
def crop(self, box,keep_ind):
cropped=[]
w, h = box[2] - box[0], box[3] - box[1]
if keep_ind is not None:
self.chars_boxes=np.array(self.chars_boxes)
self.chars_boxes = self.chars_boxes[keep_ind]
for char_boxes in self.chars_boxes:
cropped.append(char_boxes.crop(box))
return SegmentationCharMask(cropped, use_char_ann=self.use_char_ann, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
scaled = []
for char_boxes in self.chars_boxes:
scaled.append(char_boxes.resize(size, *args, **kwargs))
return SegmentationCharMask(scaled, use_char_ann=self.use_char_ann, size=size, mode=self.mode)
def rotate(self, angle, r_c, start_h, start_w):
rotated=[]
for char_boxes in self.chars_boxes:
rotated.append(char_boxes.rotate(angle,r_c,start_h,start_w))
return SegmentationCharMask(rotated,use_char_ann=self.use_char_ann, size=(r_c[0]*2,r_c[1]*2), mode=self.mode)
def __iter__(self):
return iter(self.chars_boxes)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_chars_boxes = [self.chars_boxes[item]]
else:
# advanced indexing on a single dimension
selected_chars_boxes = []
if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
if i >= len(self.chars_boxes):
print(i)
print('chars_boxes.shape: ', len(self.chars_boxes))
input()
selected_chars_boxes.append(self.chars_boxes[i])
return SegmentationCharMask(selected_chars_boxes, use_char_ann=self.use_char_ann, size=self.size, mode=self.mode)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_char_boxes={}, ".format(len(self.chars_boxes))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
def shrink_poly(poly, shrink):
# shrink ratio
R = shrink
r = [None, None, None, None]
for i in range(4):
r[i] = min(np.linalg.norm(poly[i] - poly[(i + 1) % 4]),
np.linalg.norm(poly[i] - poly[(i - 1) % 4]))
# find the longer pair
if np.linalg.norm(poly[0] - poly[1]) + np.linalg.norm(poly[2] - poly[3]) > \
np.linalg.norm(poly[0] - poly[3]) + np.linalg.norm(poly[1] - poly[2]):
# first move (p0, p1), (p2, p3), then (p0, p3), (p1, p2)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
## p0, p3
theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
else:
## p0, p3
# print poly
theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
return poly
def shrink_rect(poly, shrink):
xmin = min(poly[:,0])
xmax = max(poly[:,0])
ymin = min(poly[:,1])
ymax = max(poly[:,1])
# assert xmax > xmin and ymax > ymin
xc = (xmax + xmin) / 2
yc = (ymax + ymin) / 2
w = xmax - xmin
h = ymax - ymin
sxmin = xc - w/2*shrink
sxmax = xc + w/2*shrink
symin = yc - h/2*shrink
symax = yc + h/2*shrink
return np.array([sxmin, symin, sxmax, symin, sxmax, symax, sxmin, symax]).reshape((4, 2))
def is_poly_inbox(poly, height, width):
min_x = min(poly[:, 0])
min_y = min(poly[:, 1])
max_x = max(poly[:, 0])
max_y = max(poly[:, 1])
if (max_x < 0 and max_y < 0) or (min_x > width and min_y > height):
return False
else:
return True
| 24,511 | 39.785358 | 163 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/structures/bounding_box.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from shapely.geometry import box
from shapely import affinity
import numpy as np
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order ot uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy", use_char_ann=True):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimenion of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
self.use_char_ann=use_char_ann
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode, use_char_ann=self.use_char_ann)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode, use_char_ann=self.use_char_ann)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode, use_char_ann=self.use_char_ann)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy", use_char_ann=self.use_char_ann)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def poly2box(self,poly):
xmin = min(poly[0::2])
xmax = max(poly[0::2])
ymin = min(poly[1::2])
ymax = max(poly[1::2])
return [xmin,ymin,xmax,ymax]
def rotate(self, angle, r_c, start_h, start_w):
masks=self.extra_fields['masks']
masks=masks.rotate(angle,r_c,start_h,start_w)
polys=masks.polygons
boxes=[]
for poly in polys:
box=self.poly2box(poly.polygons[0].numpy())
boxes.append(box)
self.size=(r_c[0]*2,r_c[1]*2)
bbox = BoxList(boxes, self.size, mode="xyxy",use_char_ann=self.use_char_ann)
for k, v in self.extra_fields.items():
if k == 'masks':
v=masks
else:
if self.use_char_ann:
if not isinstance(v, torch.Tensor):
v = v.rotate(angle,r_c,start_h,start_w)
else:
if not isinstance(v, torch.Tensor) and k != "char_masks":
v = v.rotate(angle, r_c, start_h, start_w)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(transposed_boxes, self.size, mode="xyxy",use_char_ann=self.use_char_ann)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
#ipdb.set_trace()
keep_ind=None
not_empty = np.where((cropped_xmin != cropped_xmax) & (cropped_ymin != cropped_ymax))[0]
if len(not_empty) > 0:
keep_ind = not_empty
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
cropped_box=cropped_box[not_empty]
bbox = BoxList(cropped_box, (w, h), mode="xyxy",use_char_ann=self.use_char_ann)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if self.use_char_ann:
if not isinstance(v, torch.Tensor):
v = v.crop(box,keep_ind)
else:
if not isinstance(v, torch.Tensor) and k != "char_masks":
v = v.crop(box,keep_ind)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode,self.use_char_ann)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode,self.use_char_ann)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
TO_REMOVE = 1
box = self.bbox
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
return area
def copy_with_fields(self, fields):
bbox = BoxList(self.bbox, self.size, self.mode,self.use_char_ann)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
bbox.add_field(field, self.get_field(field))
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
| 11,086 | 36.456081 | 96 | py |
MaskTextSpotter | MaskTextSpotter-master/maskrcnn_benchmark/structures/boxlist_ops.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .bounding_box import BoxList
from maskrcnn_benchmark.layers import nms as _box_nms
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maxium suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
score = boxlist.get_field(score_field)
keep = _box_nms(boxes, score, nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
boxlist = boxlist[keep]
return boxlist.convert(mode)
def remove_small_boxes(boxlist, min_size):
"""
Only keep boxes with both sides >= min_size
Arguments:
boxlist (Boxlist)
min_size (int)
"""
# TODO maybe add an API for querying the ws / hs
xywh_boxes = boxlist.convert("xywh").bbox
_, _, ws, hs = xywh_boxes.unbind(dim=1)
keep = (
(ws >= min_size) & (hs >= min_size)
).nonzero().squeeze(1)
return boxlist[keep]
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def boxlist_iou(boxlist1, boxlist2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Arguments:
box1: (BoxList) bounding boxes, sized [N,4].
box2: (BoxList) bounding boxes, sized [M,4].
Returns:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if boxlist1.size != boxlist2.size:
raise RuntimeError(
"boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2))
N = len(boxlist1)
M = len(boxlist2)
area1 = boxlist1.area()
area2 = boxlist2.area()
box1, box2 = boxlist1.bbox, boxlist2.bbox
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
# TODO redundant, remove
def _cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def cat_boxlist(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode)
for field in fields:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes
| 3,635 | 27.186047 | 97 | py |
MaskTextSpotter | MaskTextSpotter-master/tests/checkpoint.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
import os
from tempfile import TemporaryDirectory
import unittest
import torch
from torch import nn
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from maskrcnn_benchmark.utils.checkpoint import Checkpointer
class TestCheckpointer(unittest.TestCase):
def create_model(self):
return nn.Sequential(nn.Linear(2, 3), nn.Linear(3, 1))
def create_complex_model(self):
m = nn.Module()
m.block1 = nn.Module()
m.block1.layer1 = nn.Linear(2, 3)
m.layer2 = nn.Linear(3, 2)
m.res = nn.Module()
m.res.layer2 = nn.Linear(3, 2)
state_dict = OrderedDict()
state_dict["layer1.weight"] = torch.rand(3, 2)
state_dict["layer1.bias"] = torch.rand(3)
state_dict["layer2.weight"] = torch.rand(2, 3)
state_dict["layer2.bias"] = torch.rand(2)
state_dict["res.layer2.weight"] = torch.rand(2, 3)
state_dict["res.layer2.bias"] = torch.rand(2)
return m, state_dict
def test_from_last_checkpoint_model(self):
# test that loading works even if they differ by a prefix
for trained_model, fresh_model in [
(self.create_model(), self.create_model()),
(nn.DataParallel(self.create_model()), self.create_model()),
(self.create_model(), nn.DataParallel(self.create_model())),
(
nn.DataParallel(self.create_model()),
nn.DataParallel(self.create_model()),
),
]:
with TemporaryDirectory() as f:
checkpointer = Checkpointer(
trained_model, save_dir=f, save_to_disk=True
)
checkpointer.save("checkpoint_file")
# in the same folder
fresh_checkpointer = Checkpointer(fresh_model, save_dir=f)
self.assertTrue(fresh_checkpointer.has_checkpoint())
self.assertEqual(
fresh_checkpointer.get_checkpoint_file(),
os.path.join(f, "checkpoint_file.pth"),
)
_ = fresh_checkpointer.load()
for trained_p, loaded_p in zip(
trained_model.parameters(), fresh_model.parameters()
):
# different tensor references
self.assertFalse(id(trained_p) == id(loaded_p))
# same content
self.assertTrue(trained_p.equal(loaded_p))
def test_from_name_file_model(self):
# test that loading works even if they differ by a prefix
for trained_model, fresh_model in [
(self.create_model(), self.create_model()),
(nn.DataParallel(self.create_model()), self.create_model()),
(self.create_model(), nn.DataParallel(self.create_model())),
(
nn.DataParallel(self.create_model()),
nn.DataParallel(self.create_model()),
),
]:
with TemporaryDirectory() as f:
checkpointer = Checkpointer(
trained_model, save_dir=f, save_to_disk=True
)
checkpointer.save("checkpoint_file")
# on different folders
with TemporaryDirectory() as g:
fresh_checkpointer = Checkpointer(fresh_model, save_dir=g)
self.assertFalse(fresh_checkpointer.has_checkpoint())
self.assertEqual(fresh_checkpointer.get_checkpoint_file(), "")
_ = fresh_checkpointer.load(os.path.join(f, "checkpoint_file.pth"))
for trained_p, loaded_p in zip(
trained_model.parameters(), fresh_model.parameters()
):
# different tensor references
self.assertFalse(id(trained_p) == id(loaded_p))
# same content
self.assertTrue(trained_p.equal(loaded_p))
def test_complex_model_loaded(self):
for add_data_parallel in [False, True]:
model, state_dict = self.create_complex_model()
if add_data_parallel:
model = nn.DataParallel(model)
load_state_dict(model, state_dict)
for loaded, stored in zip(model.state_dict().values(), state_dict.values()):
# different tensor references
self.assertFalse(id(loaded) == id(stored))
# same content
self.assertTrue(loaded.equal(stored))
if __name__ == "__main__":
unittest.main()
| 4,645 | 38.042017 | 88 | py |
MaskTextSpotter | MaskTextSpotter-master/tests/test_data_samplers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import random
import unittest
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
from torch.utils.data.sampler import SequentialSampler
from torch.utils.data.sampler import RandomSampler
from maskrcnn_benchmark.data.samplers import GroupedBatchSampler
from maskrcnn_benchmark.data.samplers import IterationBasedBatchSampler
class SubsetSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
class TestGroupedBatchSampler(unittest.TestCase):
def test_respect_order_simple(self):
drop_uneven = False
dataset = [i for i in range(40)]
group_ids = [i // 10 for i in dataset]
sampler = SequentialSampler(dataset)
for batch_size in [1, 3, 5, 6]:
batch_sampler = GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven
)
result = list(batch_sampler)
merged_result = list(itertools.chain.from_iterable(result))
self.assertEqual(merged_result, dataset)
def test_respect_order(self):
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SequentialSampler(dataset)
expected = [
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
[[0, 1, 3], [2, 4, 5], [6, 9], [7, 8]],
[[0, 1, 3, 6], [2, 4, 5, 7], [8], [9]],
]
for idx, batch_size in enumerate([1, 3, 4]):
batch_sampler = GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven
)
result = list(batch_sampler)
self.assertEqual(result, expected[idx])
def test_respect_order_drop_uneven(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SequentialSampler(dataset)
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 1, 3], [2, 4, 5]]
self.assertEqual(result, expected)
def test_subset_sampler(self):
batch_size = 3
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([0, 3, 5, 6, 7, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 3, 6], [5, 7, 8]]
self.assertEqual(result, expected)
def test_permute_subset_sampler(self):
batch_size = 3
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([5, 0, 6, 1, 3, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[5, 8], [0, 6, 1], [3]]
self.assertEqual(result, expected)
def test_permute_subset_sampler_drop_uneven(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([5, 0, 6, 1, 3, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 6, 1]]
self.assertEqual(result, expected)
def test_len(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [random.randint(0, 1) for _ in dataset]
sampler = RandomSampler(dataset)
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
self.assertEqual(len(result), len(batch_sampler))
self.assertEqual(len(result), len(batch_sampler))
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
batch_sampler_len = len(batch_sampler)
result = list(batch_sampler)
self.assertEqual(len(result), batch_sampler_len)
self.assertEqual(len(result), len(batch_sampler))
class TestIterationBasedBatchSampler(unittest.TestCase):
def test_number_of_iters_and_elements(self):
for batch_size in [2, 3, 4]:
for num_iterations in [4, 10, 20]:
for drop_last in [False, True]:
dataset = [i for i in range(10)]
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(
sampler, batch_size, drop_last=drop_last
)
iter_sampler = IterationBasedBatchSampler(
batch_sampler, num_iterations
)
assert len(iter_sampler) == num_iterations
for i, batch in enumerate(iter_sampler):
start = (i % len(batch_sampler)) * batch_size
end = min(start + batch_size, len(dataset))
expected = [x for x in range(start, end)]
self.assertEqual(batch, expected)
if __name__ == "__main__":
unittest.main()
| 5,532 | 34.928571 | 88 | py |
ti-kd-qat | ti-kd-qat-main/main.py |
# This code is implemented base on "TernaryBERT: Distillation-aware Ultra-low Bit BERT" (Zhang et al, EMNLP2020)
# https://arxiv.org/abs/2009.12812
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import sys
import pickle
import copy
import collections
import math
import numpy as np
import numpy
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler,TensorDataset
from torch.nn import CrossEntropyLoss, MSELoss, CosineEmbeddingLoss
from transformer import BertForSequenceClassification,WEIGHTS_NAME, CONFIG_NAME
from transformer.modeling_quant import BertForSequenceClassification as QuantBertForSequenceClassification
from transformer import BertTokenizer
from transformer import BertAdam
from transformer import BertConfig
from utils_glue import *
import numpy as np
import torch.nn.functional as F
import time
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
logger = logging.getLogger()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_tensor_data(output_mode, features):
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,all_label_ids, all_seq_lengths)
return tensor_data, all_label_ids
def do_eval(model, task_name, eval_dataloader,
device, output_mode, eval_labels, num_labels, teacher_model=None):
eval_loss = 0
nb_eval_steps = 0
preds = []
for batch_ in eval_dataloader:
batch_ = tuple(t.to(device) for t in batch_)
with torch.no_grad():
input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch_
if teacher_model is not None:
teacher_logits, teacher_atts, teacher_reps, teacher_probs, teacher_values = teacher_model(input_ids, segment_ids, input_mask)
logits, student_atts, student_reps, student_probs, student_values = model(input_ids, segment_ids, input_mask, teacher_outputs=(teacher_probs, teacher_values, teacher_reps, teacher_logits, teacher_atts))
else:
logits, _, _, _, _ = model(input_ids, segment_ids, input_mask)
# create eval loss and other metric required by the task
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(task_name, preds, eval_labels.numpy())
result['eval_loss'] = eval_loss
return result
def soft_cross_entropy(predicts, targets):
student_likelihood = torch.nn.functional.log_softmax(predicts, dim=-1)
targets_prob = torch.nn.functional.softmax(targets, dim=-1)
return torch.sum((- targets_prob * student_likelihood), dim=-1).mean()
def main():
# ================================================================================ #
# ArgParse
# ================================================================================ #
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
default='data',
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_dir",
default='models',
type=str,
help="The model dir.")
parser.add_argument("--teacher_model",
default=None,
type=str,
help="The models directory.")
parser.add_argument("--student_model",
default=None,
type=str,
help="The models directory.")
parser.add_argument("--task_name",
default='sst-2',
type=str,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default='output',
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--learning_rate",
default=2e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--save_fp_model',
action='store_true',
help="Whether to save fp32 model")
parser.add_argument('--save_quantized_model',
default=False, type=str2bool,
help="Whether to save quantized model")
parser.add_argument("--input_bits",
default=8,
type=int,
help="Quantization bits for activation.")
parser.add_argument("--tc_top_k",
default=3,
type=int,
help="Top-K Coverage")
parser.add_argument("--gpus",
default=1,
type=int,
help="Number of GPUs to use")
parser.add_argument("--clip_val",
default=2.5,
type=float,
help="Initial clip value.")
parser.add_argument('--qk_FP',
default=False, type=str2bool,
)
parser.add_argument('--qkv_FP',
default=False, type=str2bool,
)
parser.add_argument('--neptune',
default=True, type=str2bool,
help="neptune logging option")
#MSKIM Quantization Range Option
parser.add_argument('--quantize',
default =True, type=str2bool,
help="Whether to quantize student model")
parser.add_argument('--ffn_1',
default =True, type=str2bool,
help="Whether to quantize Feed Forward Network")
parser.add_argument('--ffn_2',
default =True, type=str2bool,
help="Whether to quantize Feed Forward Network")
parser.add_argument('--qkv',
default =True, type=str2bool,
help="Whether to quantize Query, Key, Value Mapping Weight Matrix")
parser.add_argument('--emb',
default =True, type=str2bool,
help="Whether to quantize Embedding Layer")
parser.add_argument('--cls',
default =True, type=str2bool,
help="Whether to quantize Classifier Dense Layer")
parser.add_argument('--aug_train',
default =False, type=str2bool,
help="Whether to use augmented data or not")
parser.add_argument('--clipping',
default =False, type=str2bool,
help="Whether to use FP Weight Clipping")
parser.add_argument("--mean_scale",
default=0.7,
type=float,
help="Ternary Clipping Value Scale Value")
parser.add_argument("--exp_name",
default="",
type=str,
help="Output Directory Name")
parser.add_argument("--training_type",
default="qat_normal",
type=str,
help="QAT Method")
parser.add_argument("--aug_N",
default=30,
type=int,
help="Data Augmentation N Number")
parser.add_argument('--pred_distill',
default =False, type=str2bool,
help="prediction distill option")
parser.add_argument('--attn_distill',
default =True, type=str2bool,
help="attention Score Distill Option")
parser.add_argument('--rep_distill',
default =True, type=str2bool,
help="Transformer Layer output Distill Option")
parser.add_argument('--output_distill',
default =False, type=str2bool,
help="Context Value Distill Option")
parser.add_argument('--gt_loss',
default =False, type=str2bool,
help="Ground Truth Option")
# Teacher Intervention Options
parser.add_argument('--teacher_attnmap',
default =False, type=str2bool,
help="Teacher Intervention Option (TI-M)")
parser.add_argument('--teacher_output',
default =False, type=str2bool,
help="Teacher Intervention Option (TI-O)")
parser.add_argument('--teacher_gradual',
default =False, type=str2bool,
help="Teacher Intervention Option (TI-G)")
parser.add_argument('--teacher_stochastic',
default =False, type=str2bool,
help="Teacher Intervention Option (Stochastic Mixed)")
parser.add_argument('--teacher_inverted',
default =False, type=str2bool,
help="Teacher Intervention Option (Stochastic Mixed)")
parser.add_argument('--teacher_context',
default =False, type=str2bool,
help="Teacher Intervention Option (Context)")
parser.add_argument('--step1_option',
default ="GRAD", type=str,
help="Teacher Intervention Step-1 Option (For step-2 model init)")
parser.add_argument('--bert',
default ="base", type=str,
)
args = parser.parse_args()
# ================================================================================ #
# Logging setup
# ================================================================================ #
run = None
# Use Neptune for logging
if args.neptune:
import neptune.new as neptune
run = neptune.init_run(project='niceball0827/' + args.task_name.upper(),
api_token='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLC\
JhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiI0YjM\
0ZTYwMi1kNjQwLTQ4NGYtOTYxMy03Mjc5ZmVkMzY2YTgifQ==')
# run = neptune.init(project='Neptune_ID/ProjectName',
# api_token='Neptune_API_Token')
# ================================================================================ #
# Load Directory
# ================================================================================ #
# Exp Name
exp_name = args.exp_name
exp_name += f"_{args.bert}"
if args.training_type == "qat_step1":
if args.teacher_attnmap:
exp_name += f"_MI"
if args.teacher_context:
exp_name += f"_CI"
if args.teacher_output:
exp_name += f"_OI"
if args.teacher_gradual:
exp_name += f"_GRAD"
if args.teacher_inverted:
exp_name += f"_INVERTED"
if args.teacher_stochastic:
exp_name += f"_STOCHASTIC"
else:
if args.gt_loss:
exp_name += "_G"
if args.attn_distill:
exp_name += "_S"
if args.rep_distill:
exp_name += "_R"
if args.output_distill:
exp_name += "_O"
exp_name += f"_{args.seed}"
if args.training_type == "qat_step2":
exp_name += f"_{args.step1_option}"
args.exp_name = exp_name
if args.aug_train:
logger.info(f'DA QAT')
logger.info(f'EXP SET: {exp_name}')
logger.info(f'TASK: {args.task_name}')
logger.info(f"SIZE: {args.bert}")
logger.info(f"SEED: {args.seed}")
logger.info(f'EPOCH: {args.num_train_epochs}')
# GLUE Dataset Setting
task_name = args.task_name.lower()
data_dir = os.path.join(args.data_dir,task_name)
processed_data_dir = os.path.join(data_dir,'preprocessed')
if not os.path.exists(processed_data_dir):
os.mkdir(processed_data_dir)
# BERT Large Option
if args.bert == "large":
args.model_dir = os.path.join(args.model_dir, "BERT_large")
args.output_dir = os.path.join(args.output_dir, "BERT_large")
if args.bert == "tiny-4l":
args.model_dir = os.path.join(args.model_dir, "BERT_Tiny_4l")
args.output_dir = os.path.join(args.output_dir, "BERT_Tiny_4l")
if args.bert == "tiny-6l":
args.model_dir = os.path.join(args.model_dir, "BERT_Tiny_6l")
args.output_dir = os.path.join(args.output_dir, "BERT_Tiny_6l")
# Model Save Directory
output_dir = os.path.join(args.output_dir,task_name)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if args.save_quantized_model:
output_quant_dir = os.path.join(output_dir, 'exploration')
if not os.path.exists(output_quant_dir):
os.mkdir(output_quant_dir)
if not os.path.exists(output_quant_dir):
os.makedirs(output_quant_dir)
output_quant_dir = os.path.join(output_quant_dir, args.exp_name)
if not os.path.exists(output_quant_dir):
os.makedirs(output_quant_dir)
# ================================================================================ #
# Load Pths
# ================================================================================ #
# Student Model Pretrained FIle
if args.training_type == "qat_normal":
args.student_model = os.path.join(args.model_dir,task_name)
elif args.training_type == "qat_step1":
args.student_model = os.path.join(args.model_dir, task_name)
elif args.training_type == "qat_step2":
args.student_model = os.path.join(args.output_dir, task_name, "exploration", f"TI_step1_{args.bert}_{args.step1_option}")
else:
raise ValueError("Choose Training Type {downsteam, qat_normal, qat_step1, qat_step2, qat_step3, gradual}")
# Teacher Model Pretrained FIle
args.teacher_model = os.path.join(args.model_dir,task_name)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification"
}
default_params = {
"cola": {"max_seq_length": 64,"batch_size":16,"eval_step": 2000 if args.aug_train else 50}, # No Aug : 50 Aug : 400
"mnli": {"max_seq_length": 128,"batch_size":32,"eval_step":8000},
"mrpc": {"max_seq_length": 128,"batch_size":32,"eval_step":1000 if args.aug_train else 50},
"sst-2": {"max_seq_length": 64,"batch_size":32,"eval_step":100},
"sts-b": {"max_seq_length": 128,"batch_size":32,"eval_step":2000 if args.aug_train else 100},
"qqp": {"max_seq_length": 128,"batch_size":32,"eval_step":1000},
"qnli": {"max_seq_length": 128,"batch_size":32,"eval_step":1000},
"rte": {"max_seq_length": 128,"batch_size":32,"eval_step":1000 if args.aug_train else 20}
}
acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"]
corr_tasks = ["sts-b"]
mcc_tasks = ["cola"]
# ================================================================================ #
# prepare devices
# ================================================================================ #
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = args.gpus
# ================================================================================ #
# prepare seed
# ================================================================================ #
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if task_name in default_params:
args.batch_size = default_params[task_name]["batch_size"]
if n_gpu > 0:
args.batch_size = int(args.batch_size*n_gpu)
args.max_seq_length = default_params[task_name]["max_seq_length"]
args.eval_step = default_params[task_name]["eval_step"]
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# ================================================================================ #
# Load Vocab FIle -> Tokenization
# ================================================================================ #
tokenizer = BertTokenizer.from_pretrained(args.teacher_model, do_lower_case=True)
# ================================================================================ #
# Dataset Setup (with DA)
# ================================================================================ #
if args.aug_train: # Data Augmentation
try:
train_file = os.path.join(processed_data_dir,f'aug_data_{args.aug_N}.pkl')
train_features = pickle.load(open(train_file,'rb'))
except:
train_examples = processor.get_aug_examples(data_dir, args.aug_N)
train_features = convert_examples_to_features(train_examples, label_list,
args.max_seq_length, tokenizer, output_mode)
with open(train_file, 'wb') as f:
pickle.dump(train_features, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
try:
train_file = os.path.join(processed_data_dir,'data.pkl')
train_features = pickle.load(open(train_file,'rb'))
except:
train_examples = processor.get_train_examples(data_dir)
train_features = convert_examples_to_features(train_examples, label_list,
args.max_seq_length, tokenizer, output_mode)
with open(train_file, 'wb') as f:
pickle.dump(train_features, f, protocol=pickle.HIGHEST_PROTOCOL)
num_train_epochs = args.num_train_epochs
num_train_optimization_steps = math.ceil(len(train_features) / args.batch_size) * num_train_epochs
# TI Step-2 Iteration Number Setting
if "tiny-4l" in args.bert or task_name == "cola":
ti_step_1_total_step = 120
else:
ti_step_1_total_step = 60
if args.training_type == "qat_step1":
args.eval_step = 10
num_train_optimization_steps = ti_step_1_total_step
# We keep total two-step QAT iteration number identical to baseline TernaryBERT QAT setting
# Total Iteration Step = N
# TI step-1 iteration step = s
# TI step-2 iteration step = N - s
if args.training_type == "qat_step2" :
num_train_optimization_steps = num_train_optimization_steps - ti_step_1_total_step
train_data, _ = get_tensor_data(output_mode, train_features)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size)
# Dev Data load
try:
dev_file = train_file = os.path.join(processed_data_dir,'dev.pkl')
eval_features = pickle.load(open(dev_file,'rb'))
except:
eval_examples = processor.get_dev_examples(data_dir)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
with open(dev_file, 'wb') as f:
pickle.dump(eval_features, f, protocol=pickle.HIGHEST_PROTOCOL)
eval_data, eval_labels = get_tensor_data(output_mode, eval_features)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
if task_name == "mnli":
processor = processors["mnli-mm"]()
try:
dev_mm_file = train_file = os.path.join(processed_data_dir,'dev-mm_data.pkl')
mm_eval_features = pickle.load(open(dev_mm_file,'rb'))
except:
mm_eval_examples = processor.get_dev_examples(data_dir)
mm_eval_features = convert_examples_to_features(
mm_eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
with open(dev_mm_file, 'wb') as f:
pickle.dump(mm_eval_features, f, protocol=pickle.HIGHEST_PROTOCOL)
mm_eval_data, mm_eval_labels = get_tensor_data(output_mode, mm_eval_features)
# logger.info(" Num examples = %d", len(mm_eval_features))
mm_eval_sampler = SequentialSampler(mm_eval_data)
mm_eval_dataloader = DataLoader(mm_eval_data, sampler=mm_eval_sampler,
batch_size=args.batch_size)
# ================================================================================ #
# Build Teacher Model
# ================================================================================ #
teacher_model = BertForSequenceClassification.from_pretrained(args.teacher_model, num_labels=num_labels)
teacher_model.to(device)
teacher_model.eval()
if n_gpu > 1:
teacher_model = torch.nn.DataParallel(teacher_model)
result = do_eval(teacher_model, task_name, eval_dataloader,
device, output_mode, eval_labels, num_labels)
# ================================================================================ #
# Save Teacher Model Peroformance for KD Training
# ================================================================================ #
if task_name in acc_tasks:
if task_name in ['sst-2','mnli','qnli','rte']:
fp32_performance = f"acc:{result['acc']}"
fp32_score = result['acc']
elif task_name in ['mrpc','qqp']:
fp32_performance = f"f1/acc:{result['f1']}/{result['acc']} avg : {(result['f1'] + result['acc'])*50}"
fp32_score = (result['f1'] + result['acc'])*50
if task_name in corr_tasks:
fp32_performance = f"pearson/spearmanr:{result['pearson']}/{result['spearmanr']} corr:{result['corr']}"
fp32_score = result['corr']*100
if task_name in mcc_tasks:
fp32_performance = f"mcc:{result['mcc']}"
fp32_score = result['mcc']
if task_name == "mnli":
result = do_eval(teacher_model, 'mnli-mm', mm_eval_dataloader,
device, output_mode, mm_eval_labels, num_labels)
fp32_performance += f" mm-acc:{result['acc']}"
fp32_score = result['acc']
fp32_performance = task_name +' fp32 ' + fp32_performance
# ================================================================================ #
# Build Student Model
# ================================================================================ #
student_config = BertConfig.from_pretrained(args.student_model,
clip_val = args.clip_val,
quantize = args.quantize,
ffn_q_1 = args.ffn_1,
ffn_q_2 = args.ffn_2,
qkv_q = args.qkv,
emb_q = args.emb,
cls_q = args.cls,
mean_scale = args.mean_scale,
teacher_attnmap = args.teacher_attnmap,
teacher_context = args.teacher_context,
teacher_output = args.teacher_output,
)
student_model = QuantBertForSequenceClassification.from_pretrained(args.student_model, config = student_config, num_labels=num_labels)
student_model.to(device)
# ================================================================================ #
# Training Setting
# ================================================================================ #
if n_gpu > 1:
student_model = torch.nn.DataParallel(student_model)
param_optimizer = list(student_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in (no_decay))], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
]
schedule = 'warmup_linear'
optimizer = BertAdam(optimizer_grouped_parameters,
schedule=schedule,
lr=args.learning_rate,
warmup=0.1,
t_total=num_train_optimization_steps)
norm_func = torch.linalg.norm
loss_cos = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
global_step = 0
best_dev_acc = 0.0
previous_best = None
# ================================================================================ #
# Training Start
# ================================================================================ #
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_features))
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
# Loss Init AverageMeter
l_gt_loss = AverageMeter()
l_att_loss = AverageMeter()
l_rep_loss = AverageMeter()
l_cls_loss = AverageMeter()
l_output_loss = AverageMeter()
l_loss = AverageMeter()
mixed_status = None
ce_loss_func = CrossEntropyLoss()
kl_loss = torch.nn.KLDivLoss(reduction="batchmean")
cos_loss_func = CosineEmbeddingLoss()
loss_mse = MSELoss()
for epoch_ in range(int(num_train_epochs)):
for batch in train_dataloader:
# Gradual TI (You could try other TI options - Stochastic/Inverted)
if args.training_type == "qat_step1" and args.teacher_gradual:
if global_step < num_train_optimization_steps / 6:
student_config.teacher_output = True
mixed_status = "OI"
elif global_step < num_train_optimization_steps / 3:
student_config.teacher_output = False
student_config.teacher_context = True
mixed_status = "CI"
else:
student_config.teacher_output = False
student_config.teacher_context = False
student_config.teacher_attnmap = True
mixed_status = "MI"
if args.training_type == "qat_step1" and args.teacher_stochastic:
rand_int = torch.randint(1,4,(1,))[0].item()
if rand_int == 1 :
student_config.teacher_output = True
student_config.teacher_context = False
student_config.teacher_attnmap = False
mixed_status = "OI"
elif rand_int == 2 :
student_config.teacher_output = False
student_config.teacher_context = True
student_config.teacher_attnmap = False
mixed_status = "CI"
else:
student_config.teacher_output = False
student_config.teacher_context = False
student_config.teacher_attnmap = True
mixed_status = "MI"
if args.training_type == "qat_step1" and args.teacher_inverted:
if global_step < num_train_optimization_steps / 3:
student_config.teacher_attnmap = True
mixed_status = "MI"
elif global_step < num_train_optimization_steps * 2/ 3:
student_config.teacher_attnmap = False
student_config.teacher_context = True
mixed_status = "CI"
else:
student_config.teacher_attnmap = False
student_config.teacher_context = False
student_config.teacher_output = True
mixed_status = "OI"
student_model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch
# tmp loss init
att_loss = 0.
rep_loss = 0.
cls_loss = 0.
attscore_loss = 0.
output_loss = 0.
loss = 0.
with torch.no_grad():
teacher_logits, teacher_atts, teacher_reps, teacher_probs, teacher_attn_blocks = teacher_model(input_ids, segment_ids, input_mask)
student_logits, student_atts, student_reps, student_probs, student_attn_blocks = student_model(input_ids, segment_ids, input_mask, teacher_outputs=(teacher_probs, teacher_attn_blocks))
# We did not use GT-Loss for fair comparison to TernaryBERT QAT (note that GT-loss helps boosting resulting accuracy in some tasks)
if args.gt_loss:
if output_mode == "classification":
loss = ce_loss_func(student_logits, label_ids)
elif output_mode == "regression":
loss = loss_mse(student_logits, teacher_logits)
l_gt_loss.update(loss.item())
# Pred Loss (TernaryBERT Loss)
if args.pred_distill:
if output_mode == "classification":
cls_loss = soft_cross_entropy(student_logits,teacher_logits)
elif output_mode == "regression":
cls_loss = MSELoss()(student_logits, teacher_logits)
else:
cls_loss = soft_cross_entropy(student_logits,teacher_logits)
l_cls_loss.update(cls_loss.item())
# Output Loss
if args.output_distill:
for i, (student_attn_block, teacher_attn_block) in enumerate(zip(student_attn_blocks, teacher_attn_blocks)):
tmp_loss = MSELoss()(student_attn_block[1], teacher_attn_block[1]) # 1 : Attention Output 0 : Layer Context
output_loss += tmp_loss
l_output_loss.update(output_loss.item())
# Attention Score Loss (TernaryBERT Loss)
if args.attn_distill:
for i, (student_att, teacher_att) in enumerate(zip(student_atts, teacher_atts)):
student_att = torch.where(student_att <= -1e2, torch.zeros_like(student_att).to("cuda"),
student_att)
teacher_att = torch.where(teacher_att <= -1e2, torch.zeros_like(teacher_att).to("cuda"),
teacher_att)
tmp_loss = MSELoss()(student_att, teacher_att)
attscore_loss += tmp_loss
l_att_loss.update(attscore_loss.item())
# Rep Distill (TernaryBERT Loss)
if args.rep_distill:
for i, (student_rep, teacher_rep) in enumerate(zip(student_reps, teacher_reps)):
tmp_loss = MSELoss()(student_rep, teacher_rep)
rep_loss += tmp_loss
l_rep_loss.update(rep_loss.item())
loss += cls_loss + rep_loss + output_loss + attscore_loss
l_loss.update(loss.item())
if n_gpu > 1:
loss = loss.mean()
# Zero Step Loss Update
if global_step == 0:
if run is not None:
run["loss/total_loss"].log(value=l_loss.avg, step=global_step)
run["loss/gt_loss_loss"].log(value=l_gt_loss.avg, step=global_step)
run["loss/att_loss_loss"].log(value=l_att_loss.avg, step=global_step)
run["loss/rep_loss_loss"].log(value=l_rep_loss.avg, step=global_step)
run["loss/cls_loss_loss"].log(value=l_cls_loss.avg, step=global_step)
run["loss/output_loss_loss"].log(value=l_output_loss.avg, step=global_step)
run["metrics/lr"].log(value=optimizer.get_lr()[0], step=global_step)
loss.backward()
optimizer.step()
optimizer.zero_grad()
global_step += 1
# ================================================================================ #
# Evaluation
# ================================================================================ #
if global_step % args.eval_step == 0 or global_step == num_train_optimization_steps-1: # period or last step
student_model.eval()
result = do_eval(student_model, task_name, eval_dataloader,
device, output_mode, eval_labels, num_labels, teacher_model=teacher_model)
result['global_step'] = global_step
result['cls_loss'] = l_cls_loss.avg
result['att_loss'] = l_att_loss.avg
result['rep_loss'] = l_rep_loss.avg
result['loss'] = l_loss.avg
# Basic Logging (Training Loss, Clip Val)
if run is not None:
run["loss/total_loss"].log(value=l_loss.avg, step=global_step)
run["loss/gt_loss_loss"].log(value=l_gt_loss.avg, step=global_step)
run["loss/att_loss_loss"].log(value=l_att_loss.avg, step=global_step)
run["loss/rep_loss_loss"].log(value=l_rep_loss.avg, step=global_step)
run["loss/cls_loss_loss"].log(value=l_cls_loss.avg, step=global_step)
run["loss/output_loss_loss"].log(value=l_output_loss.avg, step=global_step)
run["metrics/lr"].log(value=optimizer.get_lr()[0], step=global_step)
if task_name=='cola':
eval_score = result["mcc"]
if run is not None:
run["metrics/mcc"].log(value=result['mcc'], step=global_step)
eval_result = result["mcc"]
# logger.info(f"Eval Result is {result['mcc']}")
elif task_name in ['sst-2','mnli','mnli-mm','qnli','rte','wnli']:
eval_score = result["acc"]
if run is not None:
run["metrics/acc"].log(value=result['acc'],step=global_step)
logger.info(f"Eval Result is {result['acc']}")
eval_result = result["acc"]
elif task_name in ['mrpc','qqp']:
eval_score = result["acc_and_f1"]
if run is not None:
run["metrics/acc_and_f1"].log(value=result['acc_and_f1'],step=global_step)
# logger.info(f"Eval Result is {result['acc']}, {result['f1']}")
eval_result = result["acc_and_f1"]
else:
eval_score = result["corr"]
if run is not None:
run["metrics/corr"].log(value=result['corr'],step=global_step)
# logger.info(f"Eval Result is {result['corr']}")
eval_result = result["corr"]
if args.training_type == "qat_step1":
logger.info(f"Gradual-{mixed_status}-{global_step}-SAVE : {eval_result*100}")
model_to_save = student_model.module if hasattr(student_model, 'module') else student_model
quant_model = copy.deepcopy(model_to_save)
output_model_file = os.path.join(output_quant_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_quant_dir, CONFIG_NAME)
torch.save(quant_model.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_quant_dir)
# Save Model
save_model = False
if task_name in acc_tasks and result['acc'] > best_dev_acc:
if task_name in ['sst-2','mnli','qnli','rte']:
previous_best = f"{result['acc']*100}"
elif task_name in ['mrpc','qqp']:
previous_best = f"{(result['f1'] + result['acc'])*50}"
best_dev_acc = result['acc']
save_model = True
if task_name in corr_tasks and result['corr'] > best_dev_acc:
previous_best = f"{result['corr']*100}"
best_dev_acc = result['corr']
save_model = True
if task_name in mcc_tasks and result['mcc'] > best_dev_acc:
previous_best = f"{result['mcc']*100}"
best_dev_acc = result['mcc']
save_model = True
if save_model:
# logger.info("====> Best Score *****")
# Test mnli-mm
if task_name == "mnli":
result = do_eval(student_model, 'mnli-mm', mm_eval_dataloader,
device, output_mode, mm_eval_labels, num_labels, teacher_model=teacher_model)
previous_best+= f"mm-acc:{result['acc']}"
if args.training_type == "qat_step1":
logger.info(fp32_performance)
logger.info(previous_best)
if args.save_fp_model:
# logger.info("***** Save full precision model *****")
model_to_save = student_model.module if hasattr(student_model, 'module') else student_model
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
if args.save_quantized_model and not args.training_type == "qat_step1":
# logger.info("====> Save quantized model *****")
# output_quant_dir = os.path.join(output_dir, 'quant')
output_quant_dir = os.path.join(output_dir, 'exploration')
if not os.path.exists(output_quant_dir):
os.mkdir(output_quant_dir)
if not os.path.exists(output_quant_dir):
os.makedirs(output_quant_dir)
output_quant_dir = os.path.join(output_quant_dir, args.exp_name)
if not os.path.exists(output_quant_dir):
os.makedirs(output_quant_dir)
model_to_save = student_model.module if hasattr(student_model, 'module') else student_model
quant_model = copy.deepcopy(model_to_save)
output_model_file = os.path.join(output_quant_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_quant_dir, CONFIG_NAME)
torch.save(quant_model.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_quant_dir)
# TI QAT Step-1
if global_step >= num_train_optimization_steps and args.training_type == "qat_step1":
if global_step >= ti_step_1_total_step:
logger.info(f"==> TI-step1 Last Result = {eval_result}")
best_txt = os.path.join(output_quant_dir, "best_info.txt")
with open(best_txt, "w") as f_w:
f_w.write(previous_best)
return
logger.info(f"==> Previous Best = {previous_best}")
# Save Best Score
if args.save_quantized_model:
best_txt = os.path.join(output_quant_dir, "best_info.txt")
last_txt = os.path.join(output_quant_dir, "last_info.txt")
with open(best_txt, "w") as f_w:
f_w.write(previous_best)
with open(last_txt, "w") as f_w:
f_w.write(f"{eval_result*100}")
# f_w.write(f"Last Result = {result}")
if __name__ == "__main__":
main()
| 44,506 | 43.11001 | 218 | py |
ti-kd-qat | ti-kd-qat-main/utils.py | #*
# @file Different utility functions
# Copyright (c) Zhewei Yao, Amir Gholami
# All rights reserved.
# This file is part of PyHessian library.
#
# PyHessian is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyHessian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyHessian. If not, see <http://www.gnu.org/licenses/>.
#*
import torch
import math
from torch.autograd import Variable
import numpy as np
def group_product(xs, ys):
"""
the inner product of two lists of variables xs,ys
:param xs:
:param ys:
:return:
"""
return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)])
def group_add(params, update, alpha=1):
"""
params = params + update*alpha
:param params: list of variable
:param update: list of data
:return:
"""
for i, p in enumerate(params):
params[i].data.add_(update[i] * alpha)
return params
def normalization(v):
"""
normalization of a list of vectors
return: normalized vectors v
"""
s = group_product(v, v)
s = s**0.5
s = s.cpu().item()
v = [vi / (s + 1e-6) for vi in v]
return v
def get_params_grad(model):
"""
get model parameters and corresponding gradients
"""
params = []
grads = []
for param in model.parameters():
if not param.requires_grad:
continue
params.append(param)
grads.append(0. if param.grad is None else param.grad + 0.)
return params, grads
def hessian_vector_product(gradsH, params, v):
"""
compute the hessian vector product of Hv, where
gradsH is the gradient at the current point,
params is the corresponding variables,
v is the vector.
"""
hv = torch.autograd.grad(gradsH,
params,
grad_outputs=v,
only_inputs=True,
retain_graph=True)
return hv
def orthnormal(w, v_list):
"""
make vector w orthogonal to each vector in v_list.
afterwards, normalize the output w
"""
for v in v_list:
w = group_add(w, v, alpha=-group_product(w, v))
return normalization(w)
| 2,610 | 25.642857 | 70 | py |
ti-kd-qat | ti-kd-qat-main/transformer/utils_quant.py | import torch
import torch.nn as nn
import sys
import logging
from transformers import SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
logger = logging.getLogger()
class SymQuantizer(torch.autograd.Function):
"""
uniform quantization
"""
@staticmethod
def forward(ctx, input, clip_val=2.5, num_bits=2, layerwise=False):
"""
:param ctx:
:param input: tensor to be quantized
:param clip_val: clip the tensor before quantization
:param quant_bits: number of bits
:return: quantized tensor
"""
ctx.save_for_backward(input, clip_val)
# input = torch.clamp(input, clip_val[0], clip_val[1])
input = torch.where(input < clip_val[1], input, clip_val[1])
input = torch.where(input > clip_val[0], input, clip_val[0])
# NOTE: dynamic scaling (max_input).
if layerwise:
max_input = torch.max(torch.abs(input)).expand_as(input)
else:
if input.ndimension() <= 3:
# weight & hidden layer
max_input = torch.max(torch.abs(input), dim=-1, keepdim=True)[0].expand_as(input).detach()
elif input.ndimension() == 4:
# TODO: attention score matrix, calculate alpha / beta per head
tmp = input.view(input.shape[0], input.shape[1], -1)
max_input = torch.max(torch.abs(tmp), dim=-1, keepdim=True)[0].unsqueeze(-1).expand_as(input).detach()
else:
raise ValueError
s = (2 ** (num_bits - 1) - 1) / max_input
output = torch.round(input * s).div(s)
return output
@staticmethod
def backward(ctx, grad_output):
"""
:param ctx: saved non-clipped full-precision tensor and clip_val
:param grad_output: gradient ert the quantized tensor
:return: estimated gradient wrt the full-precision tensor
"""
input, clip_val = ctx.saved_tensors # unclipped input
grad_input = grad_output.clone()
grad_input[input.ge(clip_val[1])] = 0
grad_input[input.le(clip_val[0])] = 0
return grad_input, None, None, None
class AsymQuantizer(torch.autograd.Function):
"""
min-max quantization
"""
@staticmethod
def forward(ctx, input, clip_val, num_bits, layerwise):
"""
:param ctx:
:param input: tensor to be quantized
:param clip_val: clip the tensor before quantization
:param quant_bits: number of bits
:return: quantized tensor
"""
ctx.save_for_backward(input, clip_val)
input = torch.where(input < clip_val[1], input, clip_val[1])
input = torch.where(input > clip_val[0], input, clip_val[0])
# input = torch.clamp(input, clip_val[0], clip_val[1])
# NOTE: dynamic scaling gives better performance than static
if layerwise:
alpha = (input.max() - input.min()).detach()
beta = input.min().detach()
else:
if input.ndimension() <= 3:
# weight & hidden layer
alpha = (input.max(dim=-1, keepdim=True)[0] - input.min(dim=-1, keepdim=True)[0]).expand_as(input).detach()
beta = input.min(dim=-1, keepdim=True)[0].expand_as(input).detach()
elif input.ndimension() == 4:
# TODO: attention score matrix, calculate alpha / beta per head
tmp = input.view(input.shape[0], input.shape[1], -1)
alpha = (tmp.max(dim=-1, keepdim=True)[0].unsqueeze(-1) - \
tmp.min(dim=-1, keepdim=True)[0].unsqueeze(-1)).expand_as(input).detach()
beta = tmp.min(dim=-1, keepdim=True)[0].unsqueeze(-1).expand_as(input).detach()
else:
raise ValueError
input_normalized = (input - beta) / (alpha + 1e-8)
s = (2**num_bits - 1)
quant_input = torch.round(input_normalized * s).div(s)
output = quant_input * (alpha + 1e-8) + beta
return output
@staticmethod
def backward(ctx, grad_output):
"""
:param ctx: saved non-clipped full-precision tensor and clip_val
:param grad_output: gradient ert the quantized tensor
:return: estimated gradient wrt the full-precision tensor
"""
input, clip_val = ctx.saved_tensors # unclipped input
grad_input = grad_output.clone()
grad_input[input.ge(clip_val[1])] = 0
grad_input[input.le(clip_val[0])] = 0
return grad_input, None, None, None
class TwnQuantizer(torch.autograd.Function):
"""Ternary Weight Networks (TWN)
Ref: https://arxiv.org/abs/1605.04711
"""
@staticmethod
def forward(ctx, input, clip_val, num_bits, layerwise):
"""
:param input: tensor to be ternarized
:return: quantized tensor
"""
mean_scale = 0.7
ctx.save_for_backward(input, clip_val)
input = torch.where(input < clip_val[1], input, clip_val[1])
input = torch.where(input > clip_val[0], input, clip_val[0])
if layerwise:
m = input.norm(p=1).div(input.nelement())
thres = mean_scale * m
pos = (input > thres).float()
neg = (input < -thres).float()
mask = (input.abs() > thres).float()
alpha = (mask * input).abs().sum() / mask.sum()
result = alpha * pos - alpha * neg
else: # row-wise only for embed / weight
n = input[0].nelement()
m = input.data.norm(p=1, dim=1).div(n)
thres = (mean_scale * m).view(-1, 1).expand_as(input)
pos = (input > thres).float()
neg = (input < -thres).float()
mask = (input.abs() > thres).float()
alpha = ((mask * input).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
result = alpha * pos - alpha * neg
return result
@staticmethod
def backward(ctx, grad_output):
"""
:param ctx: saved non-clipped full-precision tensor and clip_val
:param grad_output: gradient ert the quantized tensor
:return: estimated gradient wrt the full-precision tensor
"""
input, clip_val = ctx.saved_tensors # unclipped input
grad_input = grad_output.clone()
grad_input[input.ge(clip_val[1])] = 0
grad_input[input.le(clip_val[0])] = 0
return grad_input, None, None, None
class QuantizeLinear(nn.Linear):
def __init__(self, *kargs,bias=True, config = None, map=False, name=None):
super(QuantizeLinear, self).__init__(*kargs,bias=True)
self.weight_bits = 2
self.input_bits= 8
self.mean_scale = config.mean_scale
self.name = name
self.map = map
self.config = config
self.weight_quantizer = TwnQuantizer
# Weight & Activation Quantization Setting
self.act_quantizer = SymQuantizer
self.register_buffer('act_clip_val', torch.tensor([-config.clip_val, config.clip_val]))
self.register_buffer('weight_clip_val', torch.tensor([-config.clip_val, config.clip_val]))\
def forward(self, input):
# quantize weight
weight = self.weight_quantizer.apply(self.weight, self.weight_clip_val, self.weight_bits, True)
q_input = self.act_quantizer.apply(input, self.act_clip_val, self.input_bits, True)
# nn.Linear w/ Quantized input and output
out = nn.functional.linear(q_input, weight)
if not self.bias is None:
out += self.bias.view(1, -1).expand_as(out)
return out
class QuantizeEmbedding(nn.Embedding):
def __init__(self, *kargs,padding_idx=None, config = None):
super(QuantizeEmbedding, self).__init__(*kargs, padding_idx = padding_idx)
self.weight_bits = 2
self.layerwise = False
self.mean_scale = config.mean_scale
self.config = config
self.weight_quantizer = TwnQuantizer
self.register_buffer('weight_clip_val', torch.tensor([-config.clip_val, config.clip_val]))
def forward(self, input):
weight = self.weight_quantizer.apply(self.weight, self.weight_clip_val, self.weight_bits, self.layerwise)
out = nn.functional.embedding(
input, weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
return out
| 8,642 | 37.932432 | 123 | py |
ti-kd-qat | ti-kd-qat-main/transformer/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
import abc
import sys
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
# logger.warning(
# "Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
# .format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
All training progress is divided in `cycles` (default=1.) parts of equal length.
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Keeps learning rate equal to 1. after warmup.
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
If `None` or `'none'`, learning rate is always kept constant.
Default : `'warmup_linear'`
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(grad, alpha=1 - beta1)
next_v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 13,044 | 42.33887 | 139 | py |
ti-kd-qat | ti-kd-qat-main/transformer/modeling_quant.py | # coding=utf-8
# 2020.04.20 - Add&replace quantization modules
# Huawei Technologies Co., Ltd <zhangwei379@huawei.com>
# Copyright (c) 2020, Huawei Technologies Co., Ltd. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.w
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.autograd import Variable
from .configuration import BertConfig
from .utils_quant import QuantizeLinear, QuantizeEmbedding, SymQuantizer, TwnQuantizer
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
#WEIGHTS_NAME = "FFN_GT_KD_AUG.bin"
from torch.nn import CrossEntropyLoss, MSELoss
def soft_cross_entropy(predicts, targets):
student_likelihood = torch.nn.functional.log_softmax(predicts, dim=-1)
targets_prob = torch.nn.functional.softmax(targets, dim=-1)
return torch.sum((- targets_prob * student_likelihood), dim=-1).mean()
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertEmbeddings(nn.Module):
def __init__(self, config):
super(BertEmbeddings, self).__init__()
if config.quantize and config.emb_q:
self.word_embeddings = QuantizeEmbedding(config.vocab_size, config.hidden_size, padding_idx = 0,config=config)
else:
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
# position_embeddings and token_type_embeddings are kept in fp32 anyway
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, i):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.i = i
self.config = config
self.input_bits = 8
# ================================================================================ #
# Weight Quant Setting
# ================================================================================ #
if self.config.quantize and config.qkv_q:
self.query = QuantizeLinear(config.hidden_size, self.all_head_size,config=config, name=f"layer_{self.i}_{self.__class__.__name__}_query")
self.key = QuantizeLinear(config.hidden_size, self.all_head_size,config=config, name=f"layer_{self.i}_{self.__class__.__name__}_key")
self.value = QuantizeLinear(config.hidden_size, self.all_head_size,config=config, name=f"layer_{self.i}_{self.__class__.__name__}_value")
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
# ================================================================================ #
# ACT Quant Setting
# ================================================================================ #
self.act_quantizer = SymQuantizer
self.register_buffer('clip_query', torch.Tensor([-config.clip_val, config.clip_val]))
self.register_buffer('clip_key', torch.Tensor([-config.clip_val, config.clip_val]))
self.register_buffer('clip_value', torch.Tensor([-config.clip_val, config.clip_val]))
self.register_buffer('clip_attn', torch.Tensor([-config.clip_val, config.clip_val]))
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, teacher_probs=None):
# Stop Grad (With TI, stopping gradient is required for internal distillation)
if self.config.teacher_attnmap:
hidden_states_ = hidden_states.clone().detach()
mixed_query_layer = self.query(hidden_states_)
mixed_key_layer = self.key(hidden_states_)
mixed_value_layer = self.value(hidden_states)
elif self.config.teacher_context or self.config.teacher_output:
hidden_states_ = hidden_states.clone().detach()
mixed_query_layer = self.query(hidden_states_)
mixed_key_layer = self.key(hidden_states_)
mixed_value_layer = self.value(hidden_states_)
else:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
# Batch Size : 16, Max_len_seq : 64
# q, k, v : 16, 64, 768
# transpose for scores : 16, 64, 768 -> 16, 64, 12, 64 -> 16, 12(head), 64, 64
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
query_layer = self.act_quantizer.apply(query_layer, self.clip_query, self.input_bits, True)
key_layer = self.act_quantizer.apply(key_layer, self.clip_key, self.input_bits, True)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
st_attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.config.teacher_attnmap and teacher_probs is not None:
# Teacher Intervention Map (TI-M)
tc_attention_probs = teacher_probs[0][self.i]
attention_prob = st_attention_probs # attention probs to return (for internal distillation)
attention_probs = self.dropout(tc_attention_probs) # replace student attention map to teacher attention map
else:
attention_prob = st_attention_probs # attention probs to return (for internal distillation)
attention_probs = self.dropout(st_attention_probs)
# quantize both attention probs and value layer for dot product
attention_probs = self.act_quantizer.apply(attention_probs, self.clip_attn, self.input_bits, True)
value_layer = self.act_quantizer.apply(value_layer, self.clip_value, self.input_bits, True)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer_ = context_layer
if self.config.teacher_context and teacher_probs is not None:
# Teacher Intervention Context (TI-C) we insert TI-C stage for giving smoothing effect to Gradual Teacher Intervention
context_layer = teacher_probs[1][self.i][0] # TI/CI - Layer Number - Context
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores, attention_prob, context_layer_
class BertAttention(nn.Module):
def __init__(self, config, i):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config, i)
self.output = BertSelfOutput(config, i)
self.config = config
self.i = i
def forward(self, input_tensor, attention_mask, teacher_probs=None):
self_output, layer_att, layer_probs, layer_context = self.self(input_tensor, attention_mask, teacher_probs=teacher_probs)
attention_output, self_output_hs = self.output(self_output, input_tensor, teacher_probs=teacher_probs)
return attention_output, layer_att, layer_probs, (layer_context, attention_output, self_output_hs)
class BertSelfOutput(nn.Module):
def __init__(self, config, i):
super(BertSelfOutput, self).__init__()
self.config = config
self.i = i
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
if not self.config.qkv_q:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = QuantizeLinear(config.hidden_size, config.hidden_size,config=config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor, teacher_probs=None):
hidden_states = self.dense(hidden_states)
self_output_hs = hidden_states
if self.config.teacher_output:
hidden_states = teacher_probs[1][self.i][2] # SA-output
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states ,self_output_hs
class BertIntermediate(nn.Module):
def __init__(self, config, i):
super(BertIntermediate, self).__init__()
self.i = i
if config.quantize and config.ffn_q_1:
self.dense = QuantizeLinear(config.hidden_size, config.intermediate_size,config=config, name=f"layer_{self.i}_{self.__class__.__name__}")
else:
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = gelu(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config, i):
super(BertOutput, self).__init__()
self.i = i
if config.quantize and config.ffn_q_2:
self.dense = QuantizeLinear(config.intermediate_size, config.hidden_size,config=config, name=f"layer_{self.i}_{self.__class__.__name__}")
else:
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, i):
super(BertLayer, self).__init__()
self.attention = BertAttention(config, i)
self.intermediate = BertIntermediate(config, i)
self.output = BertOutput(config, i)
def forward(self, hidden_states, attention_mask, teacher_probs=None):
attention_output, layer_att, layer_probs, layer_value = self.attention(
hidden_states, attention_mask, teacher_probs=teacher_probs)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, layer_att, layer_probs, layer_value
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.layer = nn.ModuleList([BertLayer(config, i)
for i in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, teacher_probs=None):
all_encoder_layers = [hidden_states]
all_encoder_atts = []
all_encoder_probs = []
all_encoder_values = []
for _, layer_module in enumerate(self.layer):
hidden_states, layer_att, layer_probs, layer_value = layer_module(
hidden_states, attention_mask, teacher_probs=teacher_probs)
all_encoder_layers.append(hidden_states)
all_encoder_atts.append(layer_att)
all_encoder_probs.append(layer_probs)
all_encoder_values.append(layer_value)
return all_encoder_layers, all_encoder_atts, all_encoder_probs, all_encoder_values
class BertPooler(nn.Module):
def __init__(self, config, recurs=None):
super(BertPooler, self).__init__()
if config.quantize and config.cls_q:
self.dense = QuantizeLinear(config.hidden_size, config.hidden_size,config=config, name=f"{self.__class__.__name__}")
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.config = config
def forward(self, hidden_states):
pooled_output = hidden_states[-1][:, 0]
pooled_output = self.dense(pooled_output)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Params:
pretrained_model_name_or_path:
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
config: BertConfig instance
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
config = kwargs.get('config', None)
kwargs.pop('config', None)
if config is None:
# Load config
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
#logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(
pretrained_model_name_or_path, WEIGHTS_NAME)
# logger.info("Loading model {}".format(weights_path))
state_dict = torch.load(weights_path, map_location='cpu')
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
# logger.info('loading model...')
load(model, prefix=start_prefix)
return model
class BertModel(BertPreTrainedModel):
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, teacher_probs=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# extended_attention_mask = extended_attention_mask.to(
# dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers, attention_scores, attention_probs, attention_values = self.encoder(embedding_output,
extended_attention_mask, teacher_probs=teacher_probs)
pooled_output = self.pooler(encoded_layers)
return encoded_layers, attention_scores, attention_probs, attention_values, pooled_output
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config, num_labels = 2):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
self.config = config
def forward(self, input_ids,
token_type_ids=None,
attention_mask=None,
labels=None,
teacher_outputs=None,
seq_lengths=None):
encoded_layers, student_atts, attention_probs, attention_values, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, teacher_probs=teacher_outputs)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits, student_atts, encoded_layers, attention_probs, attention_values
| 21,718 | 43.966874 | 172 | py |
ti-kd-qat | ti-kd-qat-main/transformer/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.autograd import Variable
from .configuration import BertConfig
from .utils_quant import QuantizeLinear, QuantizeEmbedding, SymQuantizer
logger = logging.getLogger(__name__)
#CONFIG_NAME = "config_bert_base.json"
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
#WEIGHTS_NAME = "FFN_GT_KD_AUG.bin"
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertEmbeddings(nn.Module):
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx = 0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, i):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.i = i
self.config = config
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_prob = attention_probs
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer_ = context_layer
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores, attention_prob, context_layer_ # , value_layer
class BertSelfOutput(nn.Module):
def __init__(self, config, i):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
self_output_hs = hidden_states
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states ,self_output_hs
class BertAttention(nn.Module):
def __init__(self, config, i):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config, i)
self.output = BertSelfOutput(config, i)
def forward(self, input_tensor, attention_mask):
self_output, layer_att, layer_probs, layer_context = self.self(input_tensor, attention_mask)
attention_output, self_output_hs = self.output(self_output, input_tensor)
return attention_output, layer_att, layer_probs, (layer_context, attention_output, self_output_hs)
class BertIntermediate(nn.Module):
def __init__(self, config, i):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.i = i
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = gelu(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config, i):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.i = i
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, i):
super(BertLayer, self).__init__()
self.attention = BertAttention(config, i)
self.intermediate = BertIntermediate(config, i)
self.output = BertOutput(config, i)
def forward(self, hidden_states, attention_mask):
attention_output, layer_att, layer_probs, layer_value = self.attention(
hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, layer_att, layer_probs, layer_value
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.layer = nn.ModuleList([BertLayer(config, i)
for i in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask):
all_encoder_layers = [hidden_states]
all_encoder_atts = []
all_encoder_probs = []
all_encoder_values = []
for _, layer_module in enumerate(self.layer):
hidden_states, layer_att, layer_probs, layer_value = layer_module(
hidden_states, attention_mask)
all_encoder_layers.append(hidden_states)
all_encoder_atts.append(layer_att)
all_encoder_probs.append(layer_probs)
all_encoder_values.append(layer_value)
return all_encoder_layers, all_encoder_atts, all_encoder_probs, all_encoder_values
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
# MSKIM
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Params:
pretrained_model_name_or_path:
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
config: BertConfig instance
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
config = kwargs.get('config', None)
kwargs.pop('config', None)
if config is None:
# Load config
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
#logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(
pretrained_model_name_or_path, WEIGHTS_NAME)
# logger.info("Loading model {}".format(weights_path))
state_dict = torch.load(weights_path, map_location='cpu')
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
# logger.info('loading model...')
load(model, prefix=start_prefix)
return model
class BertModel(BertPreTrainedModel):
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers, attention_scores, attention_probs, attention_values = self.encoder(embedding_output,
extended_attention_mask)
pooled_output = self.pooler(encoded_layers[-1])
return encoded_layers, attention_scores, attention_probs, attention_values, pooled_output
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config, num_labels = 2):
super(BertForSequenceClassification, self).__init__(config)
# MSKIM made exception for MNLI Classifier
if 'num_labels' in config.to_dict():
self.num_labels = config.num_labels
else:
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids,
token_type_ids=None,
attention_mask=None,
labels=None):
encoded_layers, attention_scores, attention_probs, attention_values, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, attention_scores, encoded_layers
else:
return logits, attention_scores, encoded_layers, attention_probs, attention_values
| 16,313 | 40.093199 | 145 | py |
ti-kd-qat | ti-kd-qat-main/transformer/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 9,106 | 32.72963 | 112 | py |
gevent | gevent-master/src/gevent/testing/patched_tests_setup.py | # pylint:disable=missing-docstring,invalid-name,too-many-lines
from __future__ import print_function, absolute_import, division
import collections
import contextlib
import functools
import sys
import os
# At least on 3.6+, importing platform
# imports subprocess, which imports selectors. That
# can expose issues with monkey patching. We don't need it
# though.
# import platform
import re
from .sysinfo import RUNNING_ON_APPVEYOR as APPVEYOR
from .sysinfo import RUNNING_ON_TRAVIS as TRAVIS
from .sysinfo import RESOLVER_NOT_SYSTEM as ARES
from .sysinfo import RESOLVER_ARES
from .sysinfo import RESOLVER_DNSPYTHON
from .sysinfo import RUNNING_ON_CI
from .sysinfo import RUNNING_ON_MUSLLINUX
from .sysinfo import RUN_COVERAGE
from .sysinfo import PYPY
from .sysinfo import PYPY3
from .sysinfo import PY38
from .sysinfo import PY39
from .sysinfo import PY310
from .sysinfo import PY311
from .sysinfo import PY312
from .sysinfo import WIN
from .sysinfo import OSX
from .sysinfo import LIBUV
from .sysinfo import CFFI_BACKEND
from . import flaky
CPYTHON = not PYPY
# By default, test cases are expected to switch and emit warnings if there was none
# If a test is found in this list, it's expected not to switch.
no_switch_tests = '''test_patched_select.SelectTestCase.test_error_conditions
test_patched_ftplib.*.test_all_errors
test_patched_ftplib.*.test_getwelcome
test_patched_ftplib.*.test_sanitize
test_patched_ftplib.*.test_set_pasv
#test_patched_ftplib.TestIPv6Environment.test_af
test_patched_socket.TestExceptions.testExceptionTree
test_patched_socket.Urllib2FileobjectTest.testClose
test_patched_socket.TestLinuxAbstractNamespace.testLinuxAbstractNamespace
test_patched_socket.TestLinuxAbstractNamespace.testMaxName
test_patched_socket.TestLinuxAbstractNamespace.testNameOverflow
test_patched_socket.FileObjectInterruptedTestCase.*
test_patched_urllib.*
test_patched_asyncore.HelperFunctionTests.*
test_patched_httplib.BasicTest.*
test_patched_httplib.HTTPSTimeoutTest.test_attributes
test_patched_httplib.HeaderTests.*
test_patched_httplib.OfflineTest.*
test_patched_httplib.HTTPSTimeoutTest.test_host_port
test_patched_httplib.SourceAddressTest.testHTTPSConnectionSourceAddress
test_patched_select.SelectTestCase.test_error_conditions
test_patched_smtplib.NonConnectingTests.*
test_patched_urllib2net.OtherNetworkTests.*
test_patched_wsgiref.*
test_patched_subprocess.HelperFunctionTests.*
'''
ignore_switch_tests = '''
test_patched_socket.GeneralModuleTests.*
test_patched_httpservers.BaseHTTPRequestHandlerTestCase.*
test_patched_queue.*
test_patched_signal.SiginterruptTest.*
test_patched_urllib2.*
test_patched_ssl.*
test_patched_signal.BasicSignalTests.*
test_patched_threading_local.*
test_patched_threading.*
'''
def make_re(tests):
tests = [x.strip().replace(r'\.', r'\\.').replace('*', '.*?')
for x in tests.split('\n') if x.strip()]
return re.compile('^%s$' % '|'.join(tests))
no_switch_tests = make_re(no_switch_tests)
ignore_switch_tests = make_re(ignore_switch_tests)
def get_switch_expected(fullname):
"""
>>> get_switch_expected('test_patched_select.SelectTestCase.test_error_conditions')
False
>>> get_switch_expected('test_patched_socket.GeneralModuleTests.testCrucialConstants')
False
>>> get_switch_expected('test_patched_socket.SomeOtherTest.testHello')
True
>>> get_switch_expected("test_patched_httplib.BasicTest.test_bad_status_repr")
False
"""
# certain pylint versions mistype the globals as
# str, not re.
# pylint:disable=no-member
if ignore_switch_tests.match(fullname) is not None:
return None
if no_switch_tests.match(fullname) is not None:
return False
return True
disabled_tests = [
# Want's __module__ to be 'signal', which of course it isn't once
# monkey-patched.
'test_signal.GenericTests.test_functions_module_attr',
# XXX: While we debug latest updates. This is leaking
'test_threading.ThreadTests.test_no_refcycle_through_target',
# The server side takes awhile to shut down
'test_httplib.HTTPSTest.test_local_bad_hostname',
# These were previously 3.5+ issues (same as above)
# but have been backported.
'test_httplib.HTTPSTest.test_local_good_hostname',
'test_httplib.HTTPSTest.test_local_unknown_cert',
'test_threading.ThreadTests.test_PyThreadState_SetAsyncExc',
# uses some internal C API of threads not available when threads are emulated with greenlets
'test_threading.ThreadTests.test_join_nondaemon_on_shutdown',
# asserts that repr(sleep) is '<built-in function sleep>'
'test_urllib2net.TimeoutTest.test_ftp_no_timeout',
'test_urllib2net.TimeoutTest.test_ftp_timeout',
'test_urllib2net.TimeoutTest.test_http_no_timeout',
'test_urllib2net.TimeoutTest.test_http_timeout',
# accesses _sock.gettimeout() which is always in non-blocking mode
'test_urllib2net.OtherNetworkTests.test_ftp',
# too slow
'test_urllib2net.OtherNetworkTests.test_urlwithfrag',
# fails dues to some changes on python.org
'test_urllib2net.OtherNetworkTests.test_sites_no_connection_close',
# flaky
'test_socket.UDPTimeoutTest.testUDPTimeout',
# has a bug which makes it fail with error: (107, 'Transport endpoint is not connected')
# (it creates a TCP socket, not UDP)
'test_socket.GeneralModuleTests.testRefCountGetNameInfo',
# fails with "socket.getnameinfo loses a reference" while the reference is only "lost"
# because it is referenced by the traceback - any Python function would lose a reference like that.
# the original getnameinfo does not "lose" it because it's in C.
'test_socket.NetworkConnectionNoServer.test_create_connection_timeout',
# replaces socket.socket with MockSocket and then calls create_connection.
# this unfortunately does not work with monkey patching, because gevent.socket.create_connection
# is bound to gevent.socket.socket and updating socket.socket does not affect it.
# this issues also manifests itself when not monkey patching DNS: http://code.google.com/p/gevent/issues/detail?id=54
# create_connection still uses gevent.socket.getaddrinfo while it should be using socket.getaddrinfo
'test_asyncore.BaseTestAPI.test_handle_expt',
# sends some OOB data and expect it to be detected as such; gevent.select.select does not support that
# This one likes to check its own filename, but we rewrite
# the file to a temp location during patching.
'test_asyncore.HelperFunctionTests.test_compact_traceback',
# expects time.sleep() to return prematurely in case of a signal;
# gevent.sleep() is better than that and does not get interrupted
# (unless signal handler raises an error)
'test_signal.WakeupSignalTests.test_wakeup_fd_early',
# expects select.select() to raise select.error(EINTR'interrupted
# system call') gevent.select.select() does not get interrupted
# (unless signal handler raises an error) maybe it should?
'test_signal.WakeupSignalTests.test_wakeup_fd_during',
# these rely on os.read raising EINTR which never happens with gevent.os.read
'test_signal.SiginterruptTest.test_without_siginterrupt',
'test_signal.SiginterruptTest.test_siginterrupt_on',
'test_signal.SiginterruptTest.test_siginterrupt_off',
# This one takes forever and relies on threading details
'test_signal.StressTest.test_stress_modifying_handlers',
# This uses an external file, and launches it. This means that it's not
# actually testing gevent because there's no monkey-patch.
'test_signal.PosixTests.test_interprocess_signal',
'test_subprocess.ProcessTestCase.test_leak_fast_process_del_killed',
'test_subprocess.ProcessTestCase.test_zombie_fast_process_del',
# relies on subprocess._active which we don't use
# Very slow, tries to open lots and lots of subprocess and files,
# tends to timeout on CI.
'test_subprocess.ProcessTestCase.test_no_leaking',
# This test is also very slow, and has been timing out on Travis
# since November of 2016 on Python 3, but now also seen on Python 2/Pypy.
'test_subprocess.ProcessTestCase.test_leaking_fds_on_error',
# Added between 3.6.0 and 3.6.3, uses _testcapi and internals
# of the subprocess module. Backported to Python 2.7.16.
'test_subprocess.POSIXProcessTestCase.test_stopped',
'test_ssl.ThreadedTests.test_default_ciphers',
'test_ssl.ThreadedTests.test_empty_cert',
'test_ssl.ThreadedTests.test_malformed_cert',
'test_ssl.ThreadedTests.test_malformed_key',
'test_ssl.NetworkedTests.test_non_blocking_connect_ex',
# XXX needs investigating
'test_ssl.NetworkedTests.test_algorithms',
# The host this wants to use, sha256.tbs-internet.com, is not resolvable
# right now (2015-10-10), and we need to get Windows wheels
# This started timing out randomly on Travis in oct/nov 2018. It appears
# to be something with random number generation taking too long.
'test_ssl.BasicSocketTests.test_random_fork',
# Relies on the repr of objects (Py3)
'test_ssl.BasicSocketTests.test_dealloc_warn',
# Takes forever
'test_ssl.BasicSocketTests.test_connect_ex_error',
'test_urllib2.HandlerTests.test_cookie_redirect',
# this uses cookielib which we don't care about
'test_thread.ThreadRunningTests.test__count',
'test_thread.TestForkInThread.test_forkinthread',
# XXX needs investigating
'test_subprocess.POSIXProcessTestCase.test_preexec_errpipe_does_not_double_close_pipes',
# Does not exist in the test suite until 2.7.4+. Subclasses Popen, and overrides
# _execute_child. But our version has a different parameter list than the
# version that comes with PyPy/CPython, so fails with a TypeError.
# This one crashes the interpreter if it has a bug parsing the
# invalid data.
'test_ssl.BasicSocketTests.test_parse_cert_CVE_2019_5010',
# We had to copy in a newer version of the test file for SSL fixes
# and this doesn't work reliably on all versions.
'test_httplib.HeaderTests.test_headers_debuglevel',
# On Appveyor with Python 3.8.0 and 3.7.5, this test
# for __class_getitem__ fails. Presumably this was added
# in a patch release (it's not in the PEP.) Sigh.
# https://bugs.python.org/issue38979
'test_context.ContextTest.test_contextvar_getitem',
# The same patch that fixed that removed this test,
# because it would now fail.
'test_context.ContextTest.test_context_var_new_2',
]
if OSX:
disabled_tests += [
# These are timing dependent, and sometimes run into the OS X
# kernel bug leading to 'Protocol wrong type for socket'.
# See discussion at https://github.com/benoitc/gunicorn/issues/1487
'test_ssl.SimpleBackgroundTests.test_connect_capath',
'test_ssl.SimpleBackgroundTests.test_connect_with_context',
]
if PYPY:
disabled_tests += [
# The exact error message the code code checks for is different
# (possibly just on macOS?). Plain PyPy3 fails as well.
'test_signal.WakeupSignalTests.test_wakeup_write_error',
]
if 'thread' in os.getenv('GEVENT_FILE', ''):
disabled_tests += [
'test_subprocess.ProcessTestCase.test_double_close_on_error'
# Fails with "OSError: 9 invalid file descriptor"; expect GC/lifetime issues
]
if LIBUV:
# epoll appears to work with these just fine in some cases;
# kqueue (at least on OS X, the only tested kqueue system)
# never does (failing with abort())
# (epoll on Raspbian 8.0/Debian Jessie/Linux 4.1.20 works;
# on a VirtualBox image of Ubuntu 15.10/Linux 4.2.0 both tests fail;
# Travis CI Ubuntu 12.04 precise/Linux 3.13 causes one of these tests to hang forever)
# XXX: Retry this with libuv 1.12+
disabled_tests += [
# A 2.7 test. Tries to fork, and libuv cannot fork
'test_signal.InterProcessSignalTests.test_main',
# Likewise, a forking problem
'test_signal.SiginterruptTest.test_siginterrupt_off',
]
disabled_tests += [
# This test wants to pass an arbitrary fileno
# to a socket and do things with it. libuv doesn't like this,
# it raises EPERM. It is disabled on windows already.
# It depends on whether we had a fd already open and multiplexed with
'test_socket.GeneralModuleTests.test_unknown_socket_family_repr',
# And yes, there's a typo in some versions.
'test_socket.GeneralModuleTests.test_uknown_socket_family_repr',
# This test sometimes fails at line 358. It's apparently
# extremely sensitive to timing.
'test_selectors.PollSelectorTestCase.test_timeout',
]
if OSX:
disabled_tests += [
# XXX: Starting when we upgraded from libuv 1.18.0
# to 1.19.2, this sometimes (usually) started having
# a series of calls ('select.poll(0)', 'select.poll(-1)')
# take longer than the allowed 0.5 seconds. Debugging showed that
# it was the second call that took longer, for no apparent reason.
# There doesn't seem to be a change in the source code to libuv that
# would affect this.
# XXX-XXX: This actually disables too many tests :(
'test_selectors.PollSelectorTestCase.test_timeout',
]
if RUN_COVERAGE:
disabled_tests += [
# Starting with #1145 this test (actually
# TestTLS_FTPClassMixin) becomes sensitive to timings
# under coverage.
'test_ftplib.TestFTPClass.test_storlines',
]
if sys.platform.startswith('linux'):
disabled_tests += [
# crashes with EPERM, which aborts the epoll loop, even
# though it was allowed in in the first place.
'test_asyncore.FileWrapperTest.test_dispatcher',
]
if PYPY:
disabled_tests += [
# appears to timeout?
'test_threading.ThreadTests.test_finalize_with_trace',
'test_asyncore.DispatcherWithSendTests_UsePoll.test_send',
'test_asyncore.DispatcherWithSendTests.test_send',
# More unexpected timeouts
'test_ssl.ContextTests.test__https_verify_envvar',
'test_subprocess.ProcessTestCase.test_check_output',
'test_telnetlib.ReadTests.test_read_eager_A',
# But on Windows, our gc fix for that doesn't work anyway
# so we have to disable it.
'test_urllib2_localnet.TestUrlopen.test_https_with_cafile',
# These tests hang. see above.
'test_threading.ThreadJoinOnShutdown.test_1_join_on_shutdown',
'test_threading.ThreadingExceptionTests.test_print_exception',
# Our copy of these in test__subprocess.py also hangs.
# Anything that uses Popen.communicate or directly uses
# Popen.stdXXX.read hangs. It's not clear why.
'test_subprocess.ProcessTestCase.test_communicate',
'test_subprocess.ProcessTestCase.test_cwd',
'test_subprocess.ProcessTestCase.test_env',
'test_subprocess.ProcessTestCase.test_stderr_pipe',
'test_subprocess.ProcessTestCase.test_stdout_pipe',
'test_subprocess.ProcessTestCase.test_stdout_stderr_pipe',
'test_subprocess.ProcessTestCase.test_stderr_redirect_with_no_stdout_redirect',
'test_subprocess.ProcessTestCase.test_stdout_filedes_of_stdout',
'test_subprocess.ProcessTestcase.test_stdout_none',
'test_subprocess.ProcessTestcase.test_universal_newlines',
'test_subprocess.ProcessTestcase.test_writes_before_communicate',
'test_subprocess.Win32ProcessTestCase._kill_process',
'test_subprocess.Win32ProcessTestCase._kill_dead_process',
'test_subprocess.Win32ProcessTestCase.test_shell_sequence',
'test_subprocess.Win32ProcessTestCase.test_shell_string',
'test_subprocess.CommandsWithSpaces.with_spaces',
]
if WIN:
disabled_tests += [
# This test winds up hanging a long time.
# Inserting GCs doesn't fix it.
'test_ssl.ThreadedTests.test_handshake_timeout',
# These sometimes raise LoopExit, for no apparent reason,
# mostly but not exclusively on Python 2. Sometimes (often?)
# this happens in the setUp() method when we attempt to get a client
# connection
'test_socket.BufferIOTest.testRecvFromIntoBytearray',
'test_socket.BufferIOTest.testRecvFromIntoArray',
'test_socket.BufferIOTest.testRecvIntoArray',
'test_socket.BufferIOTest.testRecvIntoMemoryview',
'test_socket.BufferIOTest.testRecvFromIntoEmptyBuffer',
'test_socket.BufferIOTest.testRecvFromIntoMemoryview',
'test_socket.BufferIOTest.testRecvFromIntoSmallBuffer',
'test_socket.BufferIOTest.testRecvIntoBytearray',
]
if PYPY:
if TRAVIS:
disabled_tests += [
# This sometimes causes a segfault for no apparent reason.
# See https://travis-ci.org/gevent/gevent/jobs/327328704
# Can't reproduce locally.
'test_subprocess.ProcessTestCase.test_universal_newlines_communicate',
]
if RUN_COVERAGE and CFFI_BACKEND:
disabled_tests += [
# This test hangs in this combo for some reason
'test_socket.GeneralModuleTests.test_sendall_interrupted',
# This can get a timeout exception instead of the Alarm
'test_socket.TCPTimeoutTest.testInterruptedTimeout',
# This test sometimes gets the wrong answer (due to changed timing?)
'test_socketserver.SocketServerTest.test_ForkingUDPServer',
# Timing and signals are off, so a handler exception doesn't get raised.
# Seen under libev
'test_signal.InterProcessSignalTests.test_main',
]
if PYPY and sys.pypy_version_info[:2] == (7, 3): # pylint:disable=no-member
if OSX:
disabled_tests += [
# This is expected to produce an SSLError, but instead it appears to
# actually work. See above for when it started failing the same on
# Travis.
'test_ssl.ThreadedTests.test_alpn_protocols',
# This fails, presumably due to the OpenSSL it's compiled with.
'test_ssl.ThreadedTests.test_default_ecdh_curve',
]
if PYPY3 and TRAVIS:
disabled_tests += [
# If socket.SOCK_CLOEXEC is defined, this creates a socket
# and tests its type with ``sock.type & socket.SOCK_CLOEXEC``
# We have a ``@property`` for ``type`` that takes care of
# ``SOCK_NONBLOCK`` on Linux, but otherwise it's just a pass-through.
# This started failing with PyPy 7.3.1 and it's not clear why.
'test_socket.InheritanceTest.test_SOCK_CLOEXEC',
]
if PYPY3 and WIN:
disabled_tests += [
# test_httpservers.CGIHTTPServerTestCase all seem to hang.
# There seem to be some general subprocess issues. This is
# ignored entirely from known_failures.py
# This produces:
#
# OSError: [Errno 10014] The system detected an invalid
# pointer address in attempting to use a pointer argument in
# a call
#
# When calling socket.socket(fileno=fd) when we actually
# call ``self._socket =self._gevent_sock_class()``.
'test_socket.GeneralModuleTests.test_socket_fileno',
# This doesn't respect the scope properly
#
# self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
# AssertionError: Tuples differ: ('ff02::1de:c0:face:8d%42', 1234, 0, 42) != ('ff02::1de:c0:face:8d', 1234, 0, 42
#
'test_socket.GeneralModuleTests.test_getaddrinfo_ipv6_scopeid_numeric',
# self.assertEqual(newsock.get_inheritable(), False)
# AssertionError: True != False
'test_socket.InheritanceTest.test_dup',
]
def _make_run_with_original(mod_name, func_name):
@contextlib.contextmanager
def with_orig():
mod = __import__(mod_name)
now = getattr(mod, func_name)
from gevent.monkey import get_original
orig = get_original(mod_name, func_name)
try:
setattr(mod, func_name, orig)
yield
finally:
setattr(mod, func_name, now)
return with_orig
@contextlib.contextmanager
def _gc_at_end():
try:
yield
finally:
import gc
gc.collect()
gc.collect()
@contextlib.contextmanager
def _flaky_socket_timeout():
import socket
try:
yield
except socket.timeout:
flaky.reraiseFlakyTestTimeout()
# Map from FQN to a context manager that will be wrapped around
# that test.
wrapped_tests = {
}
class _PatchedTest(object):
def __init__(self, test_fqn):
self._patcher = wrapped_tests[test_fqn]
def __call__(self, orig_test_fn):
@functools.wraps(orig_test_fn)
def test(*args, **kwargs):
with self._patcher():
return orig_test_fn(*args, **kwargs)
return test
if OSX:
disabled_tests += [
'test_subprocess.POSIXProcessTestCase.test_run_abort',
# causes Mac OS X to show "Python crashes" dialog box which is annoying
]
if WIN:
disabled_tests += [
# Issue with Unix vs DOS newlines in the file vs from the server
'test_ssl.ThreadedTests.test_socketserver',
# This sometimes hangs (only on appveyor)
'test_ssl.ThreadedTests.test_asyncore_server',
# On appveyor, this sometimes produces 'A non-blocking socket
# operation could not be completed immediately', followed by
# 'No connection could be made because the target machine
# actively refused it'
'test_socket.NonBlockingTCPTests.testAccept',
# On appveyor, this test has been seen to fail on 3.9 and 3.8
]
if sys.version_info[:2] <= (3, 9):
disabled_tests += [
'test_context.HamtTest.test_hamt_collision_3',
# Sometimes fails::
#
# self.assertIn('got more than ', str(cm.exception))
# AssertionError: 'got more than ' not found in
# 'Remote end closed connection without response'
#
'test_httplib.BasicTest.test_overflowing_header_limit_after_100',
]
# These are a problem on 3.5; on 3.6+ they wind up getting (accidentally) disabled.
wrapped_tests.update({
'test_socket.SendfileUsingSendTest.testWithTimeout': _flaky_socket_timeout,
'test_socket.SendfileUsingSendTest.testOffset': _flaky_socket_timeout,
'test_socket.SendfileUsingSendTest.testRegularFile': _flaky_socket_timeout,
'test_socket.SendfileUsingSendTest.testCount': _flaky_socket_timeout,
})
if PYPY:
disabled_tests += [
# Does not exist in the CPython test suite, tests for a specific bug
# in PyPy's forking. Only runs on linux and is specific to the PyPy
# implementation of subprocess (possibly explains the extra parameter to
# _execut_child)
'test_subprocess.ProcessTestCase.test_failed_child_execute_fd_leak',
# On some platforms, this returns "zlib_compression", but the test is looking for
# "ZLIB"
'test_ssl.ThreadedTests.test_compression',
# These are flaxy, apparently a race condition? Began with PyPy 2.7-7 and 3.6-7
'test_asyncore.TestAPI_UsePoll.test_handle_error',
'test_asyncore.TestAPI_UsePoll.test_handle_read',
]
if WIN:
disabled_tests += [
# Starting in 7.3.1 on Windows, this stopped raising ValueError; it appears to
# be a bug in PyPy.
'test_signal.WakeupFDTests.test_invalid_fd',
# Likewise for 7.3.1. See the comments for PY35
'test_socket.GeneralModuleTests.test_sock_ioctl',
]
disabled_tests += [
# These are flaky, beginning in 3.6-alpha 7.0, not finding some flag
# set, apparently a race condition
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_accept',
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_accepted',
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_close',
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_write',
'test_asyncore.TestAPI_UseIPV6Select.test_handle_read',
# These are reporting 'ssl has no attribute ...'
# This could just be an OSX thing
'test_ssl.ContextTests.test__create_stdlib_context',
'test_ssl.ContextTests.test_create_default_context',
'test_ssl.ContextTests.test_get_ciphers',
'test_ssl.ContextTests.test_options',
'test_ssl.ContextTests.test_constants',
# These tend to hang for some reason, probably not properly
# closed sockets.
'test_socketserver.SocketServerTest.test_write',
# This uses ctypes to do funky things including using ptrace,
# it hangs
'test_subprocess.ProcessTestcase.test_child_terminated_in_stopped_state',
# Certificate errors; need updated test
'test_urllib2_localnet.TestUrlopen.test_https',
]
# Generic Python 3
disabled_tests += [
# Triggers the crash reporter
'test_threading.SubinterpThreadingTests.test_daemon_threads_fatal_error',
# Relies on an implementation detail, Thread._tstate_lock
'test_threading.ThreadTests.test_tstate_lock',
# Relies on an implementation detail (reprs); we have our own version
'test_threading.ThreadTests.test_various_ops',
'test_threading.ThreadTests.test_various_ops_large_stack',
'test_threading.ThreadTests.test_various_ops_small_stack',
# Relies on Event having a _cond and an _reset_internal_locks()
# XXX: These are commented out in the source code of test_threading because
# this doesn't work.
# 'lock_tests.EventTests.test_reset_internal_locks',
# Python bug 13502. We may or may not suffer from this as its
# basically a timing race condition.
# XXX Same as above
# 'lock_tests.EventTests.test_set_and_clear',
# These tests want to assert on the type of the class that implements
# `Popen.stdin`; we use a FileObject, but they expect different subclasses
# from the `io` module
'test_subprocess.ProcessTestCase.test_io_buffered_by_default',
'test_subprocess.ProcessTestCase.test_io_unbuffered_works',
# 3.3 exposed the `endtime` argument to wait accidentally.
# It is documented as deprecated and not to be used since 3.4
# This test in 3.6.3 wants to use it though, and we don't have it.
'test_subprocess.ProcessTestCase.test_wait_endtime',
# These all want to inspect the string value of an exception raised
# by the exec() call in the child. The _posixsubprocess module arranges
# for better exception handling and printing than we do.
'test_subprocess.POSIXProcessTestCase.test_exception_bad_args_0',
'test_subprocess.POSIXProcessTestCase.test_exception_bad_executable',
'test_subprocess.POSIXProcessTestCase.test_exception_cwd',
# Relies on a 'fork_exec' attribute that we don't provide
'test_subprocess.POSIXProcessTestCase.test_exception_errpipe_bad_data',
'test_subprocess.POSIXProcessTestCase.test_exception_errpipe_normal',
# Python 3 fixed a bug if the stdio file descriptors were closed;
# we still have that bug
'test_subprocess.POSIXProcessTestCase.test_small_errpipe_write_fd',
# Relies on implementation details (some of these tests were added in 3.4,
# but PyPy3 is also shipping them.)
'test_socket.GeneralModuleTests.test_SocketType_is_socketobject',
'test_socket.GeneralModuleTests.test_dealloc_warn',
'test_socket.GeneralModuleTests.test_repr',
'test_socket.GeneralModuleTests.test_str_for_enums',
'test_socket.GeneralModuleTests.testGetaddrinfo',
]
if TRAVIS:
disabled_tests += [
# test_cwd_with_relative_executable tends to fail
# on Travis...it looks like the test processes are stepping
# on each other and messing up their temp directories. We tend to get things like
# saved_dir = os.getcwd()
# FileNotFoundError: [Errno 2] No such file or directory
'test_subprocess.ProcessTestCase.test_cwd_with_relative_arg',
'test_subprocess.ProcessTestCaseNoPoll.test_cwd_with_relative_arg',
'test_subprocess.ProcessTestCase.test_cwd_with_relative_executable',
# In 3.7 and 3.8 on Travis CI, this appears to take the full 3 seconds.
# Can't reproduce it locally. We have our own copy of this that takes
# timing on CI into account.
'test_subprocess.RunFuncTestCase.test_run_with_shell_timeout_and_capture_output',
]
disabled_tests += [
# XXX: BUG: We simply don't handle this correctly. On CPython,
# we wind up raising a BlockingIOError and then
# BrokenPipeError and then some random TypeErrors, all on the
# server. CPython 3.5 goes directly to socket.send() (via
# socket.makefile), whereas CPython 3.6 uses socket.sendall().
# On PyPy, the behaviour is much worse: we hang indefinitely, perhaps exposing a problem
# with our signal handling.
# In actuality, though, this test doesn't fully test the EINTR it expects
# to under gevent (because if its EWOULDBLOCK retry behaviour.)
# Instead, the failures were all due to `pthread_kill` trying to send a signal
# to a greenlet instead of a real thread. The solution is to deliver the signal
# to the real thread by letting it get the correct ID, and we previously
# used make_run_with_original to make it do that.
#
# But now that we have disabled our wrappers around Thread.join() in favor
# of the original implementation, that causes problems:
# background.join() thinks that it is the current thread, and won't let it
# be joined.
'test_wsgiref.IntegrationTests.test_interrupted_write',
]
# PyPy3 3.5.5 v5.8-beta
if PYPY3:
disabled_tests += [
# This raises 'RuntimeError: reentrant call' when exiting the
# process tries to close the stdout stream; no other platform does this.
# Seen in both 3.3 and 3.5 (5.7 and 5.8)
'test_signal.SiginterruptTest.test_siginterrupt_off',
]
disabled_tests += [
# This fails to close all the FDs, at least on CI. On OS X, many of the
# POSIXProcessTestCase fd tests have issues.
'test_subprocess.POSIXProcessTestCase.test_close_fds_when_max_fd_is_lowered',
# This has the wrong constants in 5.8 (but worked in 5.7), at least on
# OS X. It finds "zlib compression" but expects "ZLIB".
'test_ssl.ThreadedTests.test_compression',
# The below are new with 5.10.1
# This gets an EOF in violation of protocol; again, even without gevent
# (at least on OS X; it's less consistent about that on travis)
'test_ssl.NetworkedBIOTests.test_handshake',
# This passes various "invalid" strings and expects a ValueError. not sure why
# we don't see errors on CPython.
'test_subprocess.ProcessTestCase.test_invalid_env',
]
if OSX:
disabled_tests += [
# These all fail with "invalid_literal for int() with base 10: b''"
'test_subprocess.POSIXProcessTestCase.test_close_fds',
'test_subprocess.POSIXProcessTestCase.test_close_fds_after_preexec',
'test_subprocess.POSIXProcessTestCase.test_pass_fds',
'test_subprocess.POSIXProcessTestCase.test_pass_fds_inheritable',
'test_subprocess.POSIXProcessTestCase.test_pipe_cloexec',
# The below are new with 5.10.1
# These fail with 'OSError: received malformed or improperly truncated ancillary data'
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen0',
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen0Plus1',
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen1',
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen2Minus1',
# Using the provided High Sierra binary, these fail with
# 'ValueError: invalid protocol version _SSLMethod.PROTOCOL_SSLv3'.
# gevent code isn't involved and running them unpatched has the same issue.
'test_ssl.ContextTests.test_constructor',
'test_ssl.ContextTests.test_protocol',
'test_ssl.ContextTests.test_session_stats',
'test_ssl.ThreadedTests.test_echo',
'test_ssl.ThreadedTests.test_protocol_sslv23',
'test_ssl.ThreadedTests.test_protocol_sslv3',
'test_ssl.ThreadedTests.test_protocol_tlsv1',
'test_ssl.ThreadedTests.test_protocol_tlsv1_1',
# Similar, they fail without monkey-patching.
'test_ssl.TestPostHandshakeAuth.test_pha_no_pha_client',
'test_ssl.TestPostHandshakeAuth.test_pha_optional',
'test_ssl.TestPostHandshakeAuth.test_pha_required',
# This gets None instead of http1.1, even without gevent
'test_ssl.ThreadedTests.test_npn_protocols',
# This fails to decode a filename even without gevent,
# at least on High Sierra. Newer versions of the tests actually skip this.
'test_httpservers.SimpleHTTPServerTestCase.test_undecodable_filename',
]
disabled_tests += [
# This seems to be a buffering issue? Something isn't
# getting flushed. (The output is wrong). Under PyPy3 5.7,
# I couldn't reproduce locally in Ubuntu 16 in a VM
# or a laptop with OS X. Under 5.8.0, I can reproduce it, but only
# when run by the testrunner, not when run manually on the command line,
# so something is changing in stdout buffering in those situations.
'test_threading.ThreadJoinOnShutdown.test_2_join_in_forked_process',
'test_threading.ThreadJoinOnShutdown.test_1_join_in_forked_process',
]
if TRAVIS:
disabled_tests += [
# Likewise, but I haven't produced it locally.
'test_threading.ThreadJoinOnShutdown.test_1_join_on_shutdown',
]
if PYPY:
wrapped_tests.update({
# XXX: gevent: The error that was raised by that last call
# left a socket open on the server or client. The server gets
# to http/server.py(390)handle_one_request and blocks on
# self.rfile.readline which apparently is where the SSL
# handshake is done. That results in the exception being
# raised on the client above, but apparently *not* on the
# server. Consequently it sits trying to read from that
# socket. On CPython, when the client socket goes out of scope
# it is closed and the server raises an exception, closing the
# socket. On PyPy, we need a GC cycle for that to happen.
# Without the socket being closed and exception being raised,
# the server cannot be stopped (it runs each request in the
# same thread that would notice it had been stopped), and so
# the cleanup method added by start_https_server to stop the
# server blocks "forever".
# This is an important test, so rather than skip it in patched_tests_setup,
# we do the gc before we return.
'test_urllib2_localnet.TestUrlopen.test_https_with_cafile': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_command': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_handler': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_head_keep_alive': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_head_via_send_error': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_header_close': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_internal_key_error': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_request_line_trimming': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_return_custom_status': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_return_header_keep_alive': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_send_blank': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_send_error': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_bogus': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_digits': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_invalid': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_none': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_none_get': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_get': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_get': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_head': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_invalid_requests': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_path_without_leading_slash': _gc_at_end,
'test_httpservers.CGIHTTPServerTestCase.test_invaliduri': _gc_at_end,
'test_httpservers.CGIHTTPServerTestCase.test_issue19435': _gc_at_end,
'test_httplib.TunnelTests.test_connect': _gc_at_end,
'test_httplib.SourceAddressTest.testHTTPConnectionSourceAddress': _gc_at_end,
# Unclear
'test_urllib2_localnet.ProxyAuthTests.test_proxy_with_bad_password_raises_httperror': _gc_at_end,
'test_urllib2_localnet.ProxyAuthTests.test_proxy_with_no_password_raises_httperror': _gc_at_end,
})
disabled_tests += [
'test_subprocess.ProcessTestCase.test_threadsafe_wait',
# XXX: It seems that threading.Timer is not being greened properly, possibly
# due to a similar issue to what gevent.threading documents for normal threads.
# In any event, this test hangs forever
'test_subprocess.POSIXProcessTestCase.test_preexec_errpipe_does_not_double_close_pipes',
# Subclasses Popen, and overrides _execute_child. Expects things to be done
# in a particular order in an exception case, but we don't follow that
# exact order
'test_selectors.PollSelectorTestCase.test_above_fd_setsize',
# This test attempts to open many many file descriptors and
# poll on them, expecting them all to be ready at once. But
# libev limits the number of events it will return at once. Specifically,
# on linux with epoll, it returns a max of 64 (ev_epoll.c).
# XXX: Hangs (Linux only)
'test_socket.NonBlockingTCPTests.testInitNonBlocking',
# We don't handle the Linux-only SOCK_NONBLOCK option
'test_socket.NonblockConstantTest.test_SOCK_NONBLOCK',
# Tries to use multiprocessing which doesn't quite work in
# monkey_test module (Windows only)
'test_socket.TestSocketSharing.testShare',
# Windows-only: Sockets have a 'ioctl' method in Python 3
# implemented in the C code. This test tries to check
# for the presence of the method in the class, which we don't
# have because we don't inherit the C implementation. But
# it should be found at runtime.
'test_socket.GeneralModuleTests.test_sock_ioctl',
# XXX This fails for an unknown reason
'test_httplib.HeaderTests.test_parse_all_octets',
]
if OSX:
disabled_tests += [
# These raise "OSError: 12 Cannot allocate memory" on both
# patched and unpatched runs
'test_socket.RecvmsgSCMRightsStreamTest.testFDPassEmpty',
]
if TRAVIS:
# This has been seen to produce "Inconsistency detected by
# ld.so: dl-open.c: 231: dl_open_worker: Assertion
# `_dl_debug_initialize (0, args->nsid)->r_state ==
# RT_CONSISTENT' failed!" and fail.
disabled_tests += [
'test_threading.ThreadTests.test_is_alive_after_fork',
# This has timing constraints that are strict and do not always
# hold.
'test_selectors.PollSelectorTestCase.test_timeout',
]
if TRAVIS:
disabled_tests += [
'test_subprocess.ProcessTestCase.test_double_close_on_error',
# This test is racy or OS-dependent. It passes locally (sufficiently fast machine)
# but fails under Travis
]
disabled_tests += [
# XXX: Hangs
'test_ssl.ThreadedTests.test_nonblocking_send',
'test_ssl.ThreadedTests.test_socketserver',
# Uses direct sendfile, doesn't properly check for it being enabled
'test_socket.GeneralModuleTests.test__sendfile_use_sendfile',
# Relies on the regex of the repr having the locked state (TODO: it'd be nice if
# we did that).
# XXX: These are commented out in the source code of test_threading because
# this doesn't work.
# 'lock_tests.LockTests.lest_locked_repr',
# 'lock_tests.LockTests.lest_repr',
# This test opens a socket, creates a new socket with the same fileno,
# closes the original socket (and hence fileno) and then
# expects that the calling setblocking() on the duplicate socket
# will raise an error. Our implementation doesn't work that way because
# setblocking() doesn't actually touch the file descriptor.
# That's probably OK because this was a GIL state error in CPython
# see https://github.com/python/cpython/commit/fa22b29960b4e683f4e5d7e308f674df2620473c
'test_socket.TestExceptions.test_setblocking_invalidfd',
]
if ARES:
disabled_tests += [
# These raise different errors or can't resolve
# the IP address correctly
'test_socket.GeneralModuleTests.test_host_resolution',
'test_socket.GeneralModuleTests.test_getnameinfo',
]
disabled_tests += [
'test_threading.MiscTestCase.test__all__',
]
# We don't actually implement socket._sendfile_use_sendfile,
# so these tests, which think they're using that and os.sendfile,
# fail.
disabled_tests += [
'test_socket.SendfileUsingSendfileTest.testCount',
'test_socket.SendfileUsingSendfileTest.testCountSmall',
'test_socket.SendfileUsingSendfileTest.testCountWithOffset',
'test_socket.SendfileUsingSendfileTest.testOffset',
'test_socket.SendfileUsingSendfileTest.testRegularFile',
'test_socket.SendfileUsingSendfileTest.testWithTimeout',
'test_socket.SendfileUsingSendfileTest.testEmptyFileSend',
'test_socket.SendfileUsingSendfileTest.testNonBlocking',
'test_socket.SendfileUsingSendfileTest.test_errors',
]
# Ditto
disabled_tests += [
'test_socket.GeneralModuleTests.test__sendfile_use_sendfile',
]
disabled_tests += [
# This test requires Linux >= 4.3. When we were running 'dist:
# trusty' on the 4.4 kernel, it passed (~July 2017). But when
# trusty became the default dist in September 2017 and updated
# the kernel to 4.11.6, it begain failing. It fails on `res =
# op.recv(assoclen + len(plain) + taglen)` (where 'op' is the
# client socket) with 'OSError: [Errno 22] Invalid argument'
# for unknown reasons. This is *after* having successfully
# called `op.sendmsg_afalg`. Post 3.6.0, what we test with,
# the test was changed to require Linux 4.9 and the data was changed,
# so this is not our fault. We should eventually update this when we
# update our 3.6 version.
# See https://bugs.python.org/issue29324
'test_socket.LinuxKernelCryptoAPI.test_aead_aes_gcm',
]
disabled_tests += [
# These want to use the private '_communicate' method, which
# our Popen doesn't have.
'test_subprocess.MiscTests.test_call_keyboardinterrupt_no_kill',
'test_subprocess.MiscTests.test_context_manager_keyboardinterrupt_no_kill',
'test_subprocess.MiscTests.test_run_keyboardinterrupt_no_kill',
# This wants to check that the underlying fileno is blocking,
# but it isn't.
'test_socket.NonBlockingTCPTests.testSetBlocking',
# 3.7b2 made it impossible to instantiate SSLSocket objects
# directly, and this tests for that, but we don't follow that change.
'test_ssl.BasicSocketTests.test_private_init',
# 3.7b2 made a change to this test that on the surface looks incorrect,
# but it passes when they run it and fails when we do. It's not
# clear why.
'test_ssl.ThreadedTests.test_check_hostname_idn',
# These appear to hang, haven't investigated why
'test_ssl.SimpleBackgroundTests.test_get_server_certificate',
# Probably the same as NetworkConnectionNoServer.test_create_connection_timeout
'test_socket.NetworkConnectionNoServer.test_create_connection',
# Internals of the threading module that change.
'test_threading.ThreadTests.test_finalization_shutdown',
'test_threading.ThreadTests.test_shutdown_locks',
# Expects a deprecation warning we don't raise
'test_threading.ThreadTests.test_old_threading_api',
# This tries to use threading.interrupt_main() from a new Thread;
# but of course that's actually the same thread and things don't
# work as expected.
'test_threading.InterruptMainTests.test_interrupt_main_subthread',
'test_threading.InterruptMainTests.test_interrupt_main_noerror',
# TLS1.3 seems flaky
'test_ssl.ThreadedTests.test_wrong_cert_tls13',
]
if APPVEYOR:
disabled_tests += [
# This sometimes produces ``self.assertEqual(1, len(s.select(0))): 1 != 0``.
# Probably needs to spin the loop once.
'test_selectors.BaseSelectorTestCase.test_timeout',
]
disabled_tests += [
# This one seems very strict: doesn't want a pathlike
# first argument when shell is true.
'test_subprocess.RunFuncTestCase.test_run_with_pathlike_path',
# This tests for a warning we don't raise.
'test_subprocess.RunFuncTestCase.test_bufsize_equal_one_binary_mode',
# This compares the output of threading.excepthook with
# data constructed in Python. But excepthook is implemented in C
# and can't see the patched threading.get_ident() we use, so the
# output doesn't match.
'test_threading.ExceptHookTests.test_excepthook_thread_None',
]
if sys.version_info[:3] < (3, 8, 1):
disabled_tests += [
# Earlier versions parse differently so the newer test breaks
'test_ssl.BasicSocketTests.test_parse_all_sans',
'test_ssl.BasicSocketTests.test_parse_cert_CVE_2013_4238',
]
if sys.version_info[:3] < (3, 8, 10):
disabled_tests += [
# These were added for fixes sometime between 3.8.1 and 3.8.10
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_disabled',
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_enabled_default',
'test_httplib.BasicTest.test_dir_with_added_behavior_on_status',
'test_httplib.TunnelTests.test_tunnel_connect_single_send_connection_setup',
'test_ssl.TestSSLDebug.test_msg_callback_deadlock_bpo43577',
# This one fails with the updated certs
'test_ssl.ContextTests.test_load_verify_cadata',
# This one times out on 3.7.1 on Appveyor
'test_ftplib.TestTLS_FTPClassMixin.test_retrbinary_rest',
]
if RESOLVER_DNSPYTHON:
disabled_tests += [
# This does two things DNS python doesn't. First, it sends it
# capital letters and expects them to be returned lowercase.
# Second, it expects the symbolic scopeid to be stripped from the end.
'test_socket.GeneralModuleTests.test_getaddrinfo_ipv6_scopeid_symbolic',
]
# if 'signalfd' in os.environ.get('GEVENT_BACKEND', ''):
# # tests that don't interact well with signalfd
# disabled_tests.extend([
# 'test_signal.SiginterruptTest.test_siginterrupt_off',
# 'test_socketserver.SocketServerTest.test_ForkingTCPServer',
# 'test_socketserver.SocketServerTest.test_ForkingUDPServer',
# 'test_socketserver.SocketServerTest.test_ForkingUnixStreamServer'])
# LibreSSL reports OPENSSL_VERSION_INFO (2, 0, 0, 0, 0) regardless of its version,
# so this is known to fail on some distros. We don't want to detect this because we
# don't want to trigger the side-effects of importing ssl prematurely if we will
# be monkey-patching, so we skip this test everywhere. It doesn't do much for us
# anyway.
disabled_tests += [
'test_ssl.BasicSocketTests.test_openssl_version'
]
if OSX:
disabled_tests += [
# This sometimes produces OSError: Errno 40: Message too long
'test_socket.RecvmsgIntoTCPTest.testRecvmsgIntoGenerator',
# These sometime timeout. Cannot reproduce locally.
'test_ftp.TestTLS_FTPClassMixin.test_mlsd',
'test_ftp.TestTLS_FTPClassMixin.test_retrlines_too_long',
'test_ftp.TestTLS_FTPClassMixin.test_storlines',
'test_ftp.TestTLS_FTPClassMixin.test_retrbinary_rest',
]
if RESOLVER_ARES and PY38 and not RUNNING_ON_CI:
disabled_tests += [
# When updating to 1.16.0 this was seen locally, but not on CI.
# Tuples differ: ('ff02::1de:c0:face:8d', 1234, 0, 0)
# != ('ff02::1de:c0:face:8d', 1234, 0, 1)
'test_socket.GeneralModuleTests.test_getaddrinfo_ipv6_scopeid_symbolic',
]
if PY39:
disabled_tests += [
# Depends on exact details of the repr. Eww.
'test_subprocess.ProcessTestCase.test_repr',
# Tries to wait for the process without using Popen APIs, and expects the
# ``returncode`` attribute to stay None. But we have already hooked SIGCHLD, so
# we see and set the ``returncode``; there is no way to wait that doesn't do that.
'test_subprocess.POSIXProcessTestTest.test_send_signal_race',
]
if sys.version_info[:3] < (3, 9, 5):
disabled_tests += [
# These were added for fixes sometime between 3.9.1 and 3.9.5
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_disabled',
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_enabled_default',
'test_httplib.BasicTest.test_dir_with_added_behavior_on_status',
'test_httplib.TunnelTests.test_tunnel_connect_single_send_connection_setup',
'test_ssl.TestSSLDebug.test_msg_callback_deadlock_bpo43577',
# This one fails with the updated certs
'test_ssl.ContextTests.test_load_verify_cadata',
# These time out on 3.9.1 on Appveyor
'test_ftplib.TestTLS_FTPClassMixin.test_retrbinary_rest',
'test_ftplib.TestTLS_FTPClassMixin.test_retrlines_too_long',
]
if PY310:
disabled_tests += [
# They arbitrarily made some types so that they can't be created;
# that's an implementation detail we're not going to follow (
# it would require them to be factory functions).
'test_select.SelectTestCase.test_disallow_instantiation',
'test_threading.ThreadTests.test_disallow_instantiation',
# This wants two true threads to work, but a CPU bound loop
# in a greenlet can't be interrupted.
'test_threading.InterruptMainTests.test_can_interrupt_tight_loops',
# We don't currently implement pipesize.
'test_subprocess.ProcessTestCase.test_pipesize_default',
'test_subprocess.ProcessTestCase.test_pipesizes',
# Unknown
'test_signal.SiginterruptTest.test_siginterrupt_off',
]
if TRAVIS:
disabled_tests += [
# The mixing of subinterpreters (with threads) and gevent apparently
# leads to a segfault on Ubuntu/GitHubActions/3.10rc1. Not clear why.
# But that's not a great use case for gevent.
'test_threading.SubinterpThreadingTests.test_threads_join',
'test_threading.SubinterpThreadingTests.test_threads_join_2',
]
if PY311:
disabled_tests += [
# CPython issue #27718: This wants to require all objects to
# have a __module__ of 'signal' because pydoc. Obviously our patches don't.
'test_signal.GenericTests.test_functions_module_attr',
# 3.11 added subprocess._USE_VFORK and subprocess._USE_POSIX_SPAWN.
# We don't support either of those (although USE_VFORK might be possible?)
'test_subprocess.ProcessTestCase.test__use_vfork',
]
if PY312:
if RUN_COVERAGE:
disabled_tests += [
# This test wants to look for installed tracing functions, and
# having a coverage tracer function installed breaks it.
'test_threading.ThreadTests.test_gettrace_all_threads',
]
if WIN:
disabled_tests += [
# These three are looking for an error string that matches,
# and ours differs very slightly
'test_socket.BasicHyperVTest.testCreateHyperVSocketAddrNotTupleFailure',
'test_socket.BasicHyperVTest.testCreateHyperVSocketAddrServiceIdNotValidUUIDFailure',
'test_socket.BasicHyperVTest.testCreateHyperVSocketAddrVmIdNotValidUUIDFailure',
]
if TRAVIS:
disabled_tests += [
# These tests frequently break when we try to use newer Travis CI images,
# due to different versions of OpenSSL being available. See above for some
# specific examples. Usually the tests catch up, eventually (e.g., at this writing,
# the 3.9b1 tests are fine on Ubuntu Bionic, but all other versions fail).
'test_ssl.ContextTests.test_options',
'test_ssl.ThreadedTests.test_alpn_protocols',
'test_ssl.ThreadedTests.test_default_ecdh_curve',
'test_ssl.ThreadedTests.test_shared_ciphers',
]
if RUNNING_ON_MUSLLINUX:
disabled_tests += [
# This is supposed to *not* crash, but on the muslilnux image, it
# does crash (exitcode -11, ie, SIGSEGV)
'test_threading.ThreadingExceptionTests.test_recursion_limit',
]
# Now build up the data structure we'll use to actually find disabled tests
# to avoid a linear scan for every file (it seems the list could get quite large)
# (First, freeze the source list to make sure it isn't modified anywhere)
def _build_test_structure(sequence_of_tests):
_disabled_tests = frozenset(sequence_of_tests)
disabled_tests_by_file = collections.defaultdict(set)
for file_case_meth in _disabled_tests:
file_name, _case, _meth = file_case_meth.split('.')
by_file = disabled_tests_by_file[file_name]
by_file.add(file_case_meth)
return disabled_tests_by_file
_disabled_tests_by_file = _build_test_structure(disabled_tests)
_wrapped_tests_by_file = _build_test_structure(wrapped_tests)
def disable_tests_in_source(source, filename):
# Source and filename are both native strings.
if filename.startswith('./'):
# turn "./test_socket.py" (used for auto-complete) into "test_socket.py"
filename = filename[2:]
if filename.endswith('.py'):
filename = filename[:-3]
# XXX ignoring TestCase class name (just using function name).
# Maybe we should do this with the AST, or even after the test is
# imported.
my_disabled_tests = _disabled_tests_by_file.get(filename, ())
my_wrapped_tests = _wrapped_tests_by_file.get(filename, {})
if my_disabled_tests or my_wrapped_tests:
# Insert our imports early in the file.
# If we do it on a def-by-def basis, we can break syntax
# if the function is already decorated
pattern = r'^import .*'
replacement = r'from gevent.testing import patched_tests_setup as _GEVENT_PTS;'
replacement += r'import unittest as _GEVENT_UTS;'
replacement += r'\g<0>'
source, n = re.subn(pattern, replacement, source, 1, re.MULTILINE)
print("Added imports", n)
# Test cases will always be indented some,
# so use [ \t]+. Without indentation, test_main, commonly used as the
# __main__ function at the top level, could get matched. \s matches
# newlines even in MULTILINE mode so it would still match that.
my_disabled_testcases = set()
for test in my_disabled_tests:
testcase = test.split('.')[-1]
my_disabled_testcases.add(testcase)
# def foo_bar(self)
# ->
# @_GEVENT_UTS.skip('Removed by patched_tests_setup')
# def foo_bar(self)
pattern = r"^([ \t]+)def " + testcase
replacement = r"\1@_GEVENT_UTS.skip('Removed by patched_tests_setup: %s')\n" % (test,)
replacement += r"\g<0>"
source, n = re.subn(pattern, replacement, source, 0, re.MULTILINE)
print('Skipped %s (%d)' % (testcase, n), file=sys.stderr)
for test in my_wrapped_tests:
testcase = test.split('.')[-1]
if testcase in my_disabled_testcases:
print("Not wrapping %s because it is skipped" % (test,))
continue
# def foo_bar(self)
# ->
# @_GEVENT_PTS._PatchedTest('file.Case.name')
# def foo_bar(self)
pattern = r"^([ \t]+)def " + testcase
replacement = r"\1@_GEVENT_PTS._PatchedTest('%s')\n" % (test,)
replacement += r"\g<0>"
source, n = re.subn(pattern, replacement, source, 0, re.MULTILINE)
print('Wrapped %s (%d)' % (testcase, n), file=sys.stderr)
return source
| 57,621 | 41.873512 | 123 | py |
gevent | gevent-master/src/gevent/libuv/_corecffi_build.py | # pylint: disable=no-member
# This module is only used to create and compile the gevent.libuv._corecffi module;
# nothing should be directly imported from it except `ffi`, which should only be
# used for `ffi.compile()`; programs should import gevent._corecfffi.
# However, because we are using "out-of-line" mode, it is necessary to examine
# this file to know what functions are created and available on the generated
# module.
from __future__ import absolute_import, print_function
import os
import os.path # pylint:disable=no-name-in-module
import platform
import sys
from cffi import FFI
sys.path.append(".")
try:
import _setuputils
except ImportError:
print("This file must be imported with setup.py in the current working dir.")
raise
__all__ = []
WIN = sys.platform.startswith('win32')
LIBUV_EMBED = _setuputils.should_embed('libuv')
PY2 = sys.version_info[0] == 2
ffi = FFI()
thisdir = os.path.dirname(os.path.abspath(__file__))
parentdir = os.path.abspath(os.path.join(thisdir, '..'))
setup_py_dir = os.path.abspath(os.path.join(thisdir, '..', '..', '..'))
libuv_dir = os.path.abspath(os.path.join(setup_py_dir, 'deps', 'libuv'))
def read_source(name):
# pylint:disable=unspecified-encoding
with open(os.path.join(thisdir, name), 'r') as f:
return f.read()
_cdef = read_source('_corecffi_cdef.c')
_source = read_source('_corecffi_source.c')
# These defines and uses help keep the C file readable and lintable by
# C tools.
_cdef = _cdef.replace('#define GEVENT_STRUCT_DONE int', '')
_cdef = _cdef.replace("GEVENT_STRUCT_DONE _;", '...;')
# nlink_t is not used in libuv.
_cdef = _cdef.replace('#define GEVENT_ST_NLINK_T int',
'')
_cdef = _cdef.replace('GEVENT_ST_NLINK_T', 'nlink_t')
_cdef = _cdef.replace('#define GEVENT_UV_OS_SOCK_T int', '')
# uv_os_sock_t is int on POSIX and SOCKET on Win32, but socket is
# just another name for handle, which is just another name for 'void*'
# which we will treat as an 'unsigned long' or 'unsigned long long'
# since it comes through 'fileno()' where it has been cast as an int.
# See class watcher.io
_void_pointer_as_integer = 'intptr_t'
_cdef = _cdef.replace("GEVENT_UV_OS_SOCK_T", 'int' if not WIN else _void_pointer_as_integer)
LIBUV_INCLUDE_DIRS = [
os.path.join(libuv_dir, 'include'),
os.path.join(libuv_dir, 'src'),
]
# Initially based on https://github.com/saghul/pyuv/blob/v1.x/setup_libuv.py
def _libuv_source(rel_path):
# Certain versions of setuptools, notably on windows, are *very*
# picky about what we feed to sources= "setup() arguments must
# *always* be /-separated paths relative to the setup.py
# directory, *never* absolute paths." POSIX doesn't have that issue.
path = os.path.join('deps', 'libuv', 'src', rel_path)
return path
LIBUV_SOURCES = [
_libuv_source('fs-poll.c'),
_libuv_source('inet.c'),
_libuv_source('threadpool.c'),
_libuv_source('uv-common.c'),
_libuv_source('version.c'),
_libuv_source('uv-data-getter-setters.c'),
_libuv_source('timer.c'),
_libuv_source('idna.c'),
_libuv_source('strscpy.c'),
# Added between 1.42.0 and 1.44.2; only used
# on unix in that release, but generic
_libuv_source('strtok.c'),
]
if WIN:
LIBUV_SOURCES += [
_libuv_source('win/async.c'),
_libuv_source('win/core.c'),
_libuv_source('win/detect-wakeup.c'),
_libuv_source('win/dl.c'),
_libuv_source('win/error.c'),
_libuv_source('win/fs-event.c'),
_libuv_source('win/fs.c'),
# getaddrinfo.c refers to ConvertInterfaceIndexToLuid
# and ConvertInterfaceLuidToNameA, which are supposedly in iphlpapi.h
# and iphlpapi.lib/dll. But on Windows 10 with Python 3.5 and VC 14 (Visual Studio 2015),
# I get an undefined warning from the compiler for those functions and
# a link error from the linker, so this file can't be included.
# This is possibly because the functions are defined for Windows Vista, and
# Python 3.5 builds with at earlier SDK?
# Fortunately we don't use those functions.
#_libuv_source('win/getaddrinfo.c'),
# getnameinfo.c refers to uv__getaddrinfo_translate_error from
# getaddrinfo.c, which we don't have.
#_libuv_source('win/getnameinfo.c'),
_libuv_source('win/handle.c'),
_libuv_source('win/loop-watcher.c'),
_libuv_source('win/pipe.c'),
_libuv_source('win/poll.c'),
_libuv_source('win/process-stdio.c'),
_libuv_source('win/process.c'),
_libuv_source('win/signal.c'),
_libuv_source('win/snprintf.c'),
_libuv_source('win/stream.c'),
_libuv_source('win/tcp.c'),
_libuv_source('win/thread.c'),
_libuv_source('win/tty.c'),
_libuv_source('win/udp.c'),
_libuv_source('win/util.c'),
_libuv_source('win/winapi.c'),
_libuv_source('win/winsock.c'),
]
else:
LIBUV_SOURCES += [
_libuv_source('unix/async.c'),
_libuv_source('unix/core.c'),
_libuv_source('unix/dl.c'),
_libuv_source('unix/fs.c'),
_libuv_source('unix/getaddrinfo.c'),
_libuv_source('unix/getnameinfo.c'),
_libuv_source('unix/loop-watcher.c'),
_libuv_source('unix/loop.c'),
_libuv_source('unix/pipe.c'),
_libuv_source('unix/poll.c'),
_libuv_source('unix/process.c'),
_libuv_source('unix/signal.c'),
_libuv_source('unix/stream.c'),
_libuv_source('unix/tcp.c'),
_libuv_source('unix/thread.c'),
_libuv_source('unix/tty.c'),
_libuv_source('unix/udp.c'),
]
if sys.platform.startswith('linux'):
LIBUV_SOURCES += [
_libuv_source('unix/linux-core.c'),
_libuv_source('unix/linux-inotify.c'),
_libuv_source('unix/linux-syscalls.c'),
_libuv_source('unix/procfs-exepath.c'),
_libuv_source('unix/proctitle.c'),
_libuv_source('unix/random-sysctl-linux.c'),
_libuv_source('unix/epoll.c'),
]
elif sys.platform == 'darwin':
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/darwin.c'),
_libuv_source('unix/darwin-proctitle.c'),
_libuv_source('unix/fsevents.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/proctitle.c'),
]
elif sys.platform.startswith(('freebsd', 'dragonfly')): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/freebsd.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('openbsd'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/openbsd.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('netbsd'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/kqueue.c'),
_libuv_source('unix/netbsd.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/bsd-proctitle.c'),
]
elif sys.platform.startswith('sunos'): # pragma: no cover
# Not tested.
LIBUV_SOURCES += [
_libuv_source('unix/no-proctitle.c'),
_libuv_source('unix/sunos.c'),
]
elif sys.platform.startswith('aix'): # pragma: no cover
# Not tested.
LIBUV_SOURCES += [
_libuv_source('unix/aix.c'),
_libuv_source('unix/aix-common.c'),
]
elif sys.platform.startswith('haiku'): # pragma: no cover
# Not tested
LIBUV_SOURCES += [
_libuv_source('unix/haiku.c')
]
elif sys.platform.startswith('cygwin'): # pragma: no cover
# Not tested.
# Based on Cygwin package sources /usr/src/libuv-1.32.0-1.src/libuv-1.32.0/Makefile.am
# Apparently the same upstream at https://github.com/libuv/libuv/blob/v1.x/Makefile.am
LIBUV_SOURCES += [
_libuv_source('unix/cygwin.c'),
_libuv_source('unix/bsd-ifaddrs.c'),
_libuv_source('unix/no-fsevents.c'),
_libuv_source('unix/no-proctitle.c'),
_libuv_source('unix/posix-hrtime.c'),
_libuv_source('unix/posix-poll.c'),
_libuv_source('unix/procfs-exepath.c'),
_libuv_source('unix/sysinfo-loadavg.c'),
_libuv_source('unix/sysinfo-memory.c'),
]
LIBUV_MACROS = [
('LIBUV_EMBED', int(LIBUV_EMBED)),
]
def _define_macro(name, value):
LIBUV_MACROS.append((name, value))
LIBUV_LIBRARIES = []
def _add_library(name):
LIBUV_LIBRARIES.append(name)
if sys.platform != 'win32':
_define_macro('_LARGEFILE_SOURCE', 1)
_define_macro('_FILE_OFFSET_BITS', 64)
if sys.platform.startswith('linux'):
_add_library('dl')
_add_library('rt')
_define_macro('_GNU_SOURCE', 1)
_define_macro('_POSIX_C_SOURCE', '200112')
elif sys.platform == 'darwin':
_define_macro('_DARWIN_USE_64_BIT_INODE', 1)
_define_macro('_DARWIN_UNLIMITED_SELECT', 1)
elif sys.platform.startswith('netbsd'): # pragma: no cover
_add_library('kvm')
elif sys.platform.startswith('sunos'): # pragma: no cover
_define_macro('__EXTENSIONS__', 1)
_define_macro('_XOPEN_SOURCE', 500)
_define_macro('_REENTRANT', 1)
_add_library('kstat')
_add_library('nsl')
_add_library('sendfile')
_add_library('socket')
if platform.release() == '5.10':
# https://github.com/libuv/libuv/issues/1458
# https://github.com/giampaolo/psutil/blob/4d6a086411c77b7909cce8f4f141bbdecfc0d354/setup.py#L298-L300
_define_macro('SUNOS_NO_IFADDRS', '')
elif sys.platform.startswith('aix'): # pragma: no cover
_define_macro('_LINUX_SOURCE_COMPAT', 1)
if os.uname().sysname != 'OS400':
_add_library('perfstat')
elif WIN:
# All other gevent .pyd files link to the specific minor-version Python
# DLL, so we should do the same here. In virtual environments that don't
# contain the major-version python?.dll stub, _corecffi.pyd would otherwise
# cause the Windows DLL loader to search the entire PATH for a DLL with
# that name. This might end up bringing a second, ABI-incompatible Python
# version into the process, which can easily lead to crashes.
# See https://github.com/gevent/gevent/pull/1814/files
_define_macro('_CFFI_NO_LIMITED_API', 1)
_define_macro('_GNU_SOURCE', 1)
_define_macro('WIN32', 1)
_define_macro('_CRT_SECURE_NO_DEPRECATE', 1)
_define_macro('_CRT_NONSTDC_NO_DEPRECATE', 1)
_define_macro('_CRT_SECURE_NO_WARNINGS', 1)
_define_macro('_WIN32_WINNT', '0x0602')
_define_macro('WIN32_LEAN_AND_MEAN', 1)
# This value isn't available on the platform that we build and
# test Python 2.7 on. It's used for getting power management
# suspend/resume notifications, maybe for keeping timers accurate?
#
# TODO: This should be a more targeted check based on the platform
# version, but that's complicated because it depends on having a
# particular patch installed to the OS, and I don't know how to
# check for that...but we're dropping Python 2 support soon, so
# I suspect it really doesn't matter.
if PY2:
_define_macro('LOAD_LIBRARY_SEARCH_SYSTEM32', 0)
_add_library('advapi32')
_add_library('iphlpapi')
_add_library('psapi')
_add_library('shell32')
_add_library('user32')
_add_library('userenv')
_add_library('ws2_32')
if not LIBUV_EMBED:
del LIBUV_SOURCES[:]
del LIBUV_INCLUDE_DIRS[:]
_add_library('uv')
LIBUV_INCLUDE_DIRS.append(parentdir)
ffi.cdef(_cdef)
ffi.set_source(
'gevent.libuv._corecffi',
_source,
sources=LIBUV_SOURCES,
depends=LIBUV_SOURCES,
include_dirs=LIBUV_INCLUDE_DIRS,
libraries=list(LIBUV_LIBRARIES),
define_macros=list(LIBUV_MACROS),
extra_compile_args=list(_setuputils.IGNORE_THIRD_PARTY_WARNINGS),
)
if __name__ == '__main__':
# See notes in libev/_corecffi_build.py for how to test this.
#
# Other than the obvious directory changes, the changes are:
#
# CPPFLAGS=-Ideps/libuv/include/ -Isrc/gevent/
ffi.compile(verbose=True)
| 12,349 | 34.693642 | 110 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.