repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/byol.py | import torch
from torch.utils.data import Dataset
from openselfsup.utils import build_from_cfg
from torchvision.transforms import Compose
from .registry import DATASETS, PIPELINES
from .builder import build_datasource
from .utils import to_numpy
@DATASETS.register_module
class BYOLDataset(Dataset):
"""Dataset for BYOL.
"""
def __init__(self, data_source, pipeline1, pipeline2, prefetch=False):
self.data_source = build_datasource(data_source)
pipeline1 = [build_from_cfg(p, PIPELINES) for p in pipeline1]
self.pipeline1 = Compose(pipeline1)
pipeline2 = [build_from_cfg(p, PIPELINES) for p in pipeline2]
self.pipeline2 = Compose(pipeline2)
self.prefetch = prefetch
def __len__(self):
return self.data_source.get_length()
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
img1 = self.pipeline1(img)
img2 = self.pipeline2(img)
if self.prefetch:
img1 = torch.from_numpy(to_numpy(img1))
img2 = torch.from_numpy(to_numpy(img2))
img_cat = torch.cat((img1.unsqueeze(0), img2.unsqueeze(0)), dim=0)
return dict(img=img_cat)
def evaluate(self, scores, keyword, logger=None, **kwargs):
raise NotImplemented
| 1,284 | 29.595238 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/contrastive.py | import torch
from PIL import Image
from .registry import DATASETS
from .base import BaseDataset
from .utils import to_numpy
@DATASETS.register_module
class ContrastiveDataset(BaseDataset):
"""Dataset for contrastive learning methods that forward
two views of the image at a time (MoCo, SimCLR).
"""
def __init__(self, data_source, pipeline, prefetch=False):
data_source['return_label'] = False
super(ContrastiveDataset, self).__init__(data_source, pipeline, prefetch)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
img1 = self.pipeline(img)
img2 = self.pipeline(img)
if self.prefetch:
img1 = torch.from_numpy(to_numpy(img1))
img2 = torch.from_numpy(to_numpy(img2))
img_cat = torch.cat((img1.unsqueeze(0), img2.unsqueeze(0)), dim=0)
return dict(img=img_cat)
def evaluate(self, scores, keyword, logger=None, **kwargs):
raise NotImplemented
| 1,210 | 34.617647 | 81 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/data_sources/cifar.py | from abc import ABCMeta, abstractmethod
from PIL import Image
from torchvision.datasets import CIFAR10, CIFAR100
from ..registry import DATASOURCES
class Cifar(metaclass=ABCMeta):
CLASSES = None
def __init__(self, root, split, return_label=True):
assert split in ['train', 'test']
self.root = root
self.split = split
self.return_label = return_label
self.cifar = None
self.set_cifar()
self.labels = self.cifar.targets
@abstractmethod
def set_cifar(self):
pass
def get_length(self):
return len(self.cifar)
def get_sample(self, idx):
img = Image.fromarray(self.cifar.data[idx])
if self.return_label:
target = self.labels[idx] # img: HWC, RGB
return img, target
else:
return img
@DATASOURCES.register_module
class Cifar10(Cifar):
CLASSES = [
'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
'horse', 'ship', 'truck'
]
def __init__(self, root, split, return_label=True):
super().__init__(root, split, return_label)
def set_cifar(self):
try:
self.cifar = CIFAR10(
root=self.root, train=self.split == 'train', download=False)
except:
raise Exception("Please download CIFAR10 manually, \
in case of downloading the dataset parallelly \
that may corrupt the dataset.")
@DATASOURCES.register_module
class Cifar100(Cifar):
def __init__(self, root, split, return_label=True):
super().__init__(root, split, return_label)
def set_cifar(self):
try:
self.cifar = CIFAR100(
root=self.root, train=self.split == 'train', download=False)
except:
raise Exception("Please download CIFAR10 manually, \
in case of downloading the dataset parallelly \
that may corrupt the dataset.")
| 1,990 | 26.273973 | 76 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/loader/sampler.py | from __future__ import division
import math
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data import DistributedSampler as _DistributedSampler
from torch.utils.data import Sampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
replace=False):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.replace = replace
self.unif_sampling_flag = False
def __iter__(self):
# deterministically shuffle based on epoch
if not self.unif_sampling_flag:
self.generate_new_list()
else:
self.unif_sampling_flag = False
return iter(self.indices[self.rank * self.num_samples:(self.rank + 1) *
self.num_samples])
def generate_new_list(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
if self.replace:
indices = torch.randint(
low=0,
high=len(self.dataset),
size=(len(self.dataset), ),
generator=g).tolist()
else:
indices = torch.randperm(
len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
self.indices = indices
def set_uniform_indices(self, labels, num_classes):
self.unif_sampling_flag = True
assert self.shuffle, "Using uniform sampling, the indices must be shuffled."
np.random.seed(self.epoch)
assert (len(labels) == len(self.dataset))
N = len(labels)
size_per_label = int(N / num_classes) + 1
indices = []
images_lists = [[] for i in range(num_classes)]
for i, l in enumerate(labels):
images_lists[l].append(i)
for i, l in enumerate(images_lists):
if len(l) == 0:
continue
indices.extend(
np.random.choice(
l, size_per_label, replace=(len(l) <= size_per_label)))
indices = np.array(indices)
np.random.shuffle(indices)
indices = indices[:N].astype(np.int).tolist()
# add extra samples to make it evenly divisible
assert len(indices) <= self.total_size, \
"{} vs {}".format(len(indices), self.total_size)
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size, \
"{} vs {}".format(len(indices), self.total_size)
self.indices = indices
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, size in enumerate(self.group_sizes):
self.num_samples += int(np.ceil(
size / self.samples_per_gpu)) * self.samples_per_gpu
def __iter__(self):
indices = []
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
np.random.shuffle(indice)
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate(
[indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [
indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu))
]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class DistributedGroupSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self,
dataset,
samples_per_gpu=1,
num_replicas=None,
rank=None):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += int(
math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
self.num_replicas)) * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
indice = indice[list(torch.randperm(int(size),
generator=g))].tolist()
extra = int(
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
# pad indice
tmp = indice.copy()
for _ in range(extra // size):
indice.extend(tmp)
indice.extend(tmp[:extra % size])
indices.extend(indice)
assert len(indices) == self.total_size
indices = [
indices[j] for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class DistributedGivenIterationSampler(Sampler):
def __init__(self,
dataset,
total_iter,
batch_size,
num_replicas=None,
rank=None,
last_iter=-1):
rank, world_size = get_dist_info()
assert rank < world_size
self.dataset = dataset
self.total_iter = total_iter
self.batch_size = batch_size
self.world_size = world_size
self.rank = rank
self.last_iter = last_iter
self.total_size = self.total_iter * self.batch_size
self.indices = self.gen_new_list()
def __iter__(self):
return iter(self.indices[(self.last_iter + 1) * self.batch_size:])
def set_uniform_indices(self, labels, num_classes):
np.random.seed(0)
assert (len(labels) == len(self.dataset))
N = len(labels)
size_per_label = int(N / num_classes) + 1
indices = []
images_lists = [[] for i in range(num_classes)]
for i, l in enumerate(labels):
images_lists[l].append(i)
for i, l in enumerate(images_lists):
if len(l) == 0:
continue
indices.extend(
np.random.choice(
l, size_per_label, replace=(len(l) <= size_per_label)))
indices = np.array(indices)
np.random.shuffle(indices)
indices = indices[:N].astype(np.int)
# repeat
all_size = self.total_size * self.world_size
indices = indices[:all_size]
num_repeat = (all_size - 1) // indices.shape[0] + 1
indices = np.tile(indices, num_repeat)
indices = indices[:all_size]
np.random.shuffle(indices)
# slice
beg = self.total_size * self.rank
indices = indices[beg:beg + self.total_size]
assert len(indices) == self.total_size
# set
self.indices = indices
def gen_new_list(self):
# each process shuffle all list with same seed, and pick one piece according to rank
np.random.seed(0)
all_size = self.total_size * self.world_size
indices = np.arange(len(self.dataset))
indices = indices[:all_size]
num_repeat = (all_size - 1) // indices.shape[0] + 1
indices = np.tile(indices, num_repeat)
indices = indices[:all_size]
np.random.shuffle(indices)
beg = self.total_size * self.rank
indices = indices[beg:beg + self.total_size]
assert len(indices) == self.total_size
return indices
def __len__(self):
# note here we do not take last iter into consideration, since __len__
# should only be used for displaying, the correct remaining size is
# handled by dataloader
#return self.total_size - (self.last_iter+1)*self.batch_size
return self.total_size
def set_epoch(self, epoch):
pass
| 10,628 | 34.079208 | 92 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/loader/build_loader.py | import platform
import random
import torch
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from torch.utils.data import DataLoader
#from .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler
from .sampler import DistributedSampler, DistributedGivenIterationSampler
from torch.utils.data import RandomSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def build_dataloader(dataset,
imgs_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
replace=False,
seed=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of
each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
replace (bool): Replace or not in random shuffle.
It works on when shuffle is True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
if dist:
rank, world_size = get_dist_info()
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle, replace=replace)
batch_size = imgs_per_gpu
num_workers = workers_per_gpu
else:
if replace:
raise NotImplemented
sampler = RandomSampler(
dataset) if shuffle else None # TODO: set replace
batch_size = num_gpus * imgs_per_gpu
num_workers = num_gpus * workers_per_gpu
if kwargs.get('prefetch') is not None:
prefetch = kwargs.pop('prefetch')
img_norm_cfg = kwargs.pop('img_norm_cfg')
else:
prefetch = False
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
pin_memory=False,
worker_init_fn=worker_init_fn if seed is not None else None,
**kwargs)
if prefetch:
data_loader = PrefetchLoader(data_loader, img_norm_cfg['mean'], img_norm_cfg['std'])
return data_loader
def worker_init_fn(seed):
np.random.seed(seed)
random.seed(seed)
class PrefetchLoader:
"""
A data loader wrapper for prefetching data
"""
def __init__(self, loader, mean, std):
self.loader = loader
self._mean = mean
self._std = std
def __iter__(self):
stream = torch.cuda.Stream()
first = True
self.mean = torch.tensor([x * 255 for x in self._mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([x * 255 for x in self._std]).cuda().view(1, 3, 1, 1)
for next_input_dict in self.loader:
with torch.cuda.stream(stream):
data = next_input_dict['img'].cuda(non_blocking=True)
next_input_dict['img'] = data.float().sub_(self.mean).div_(self.std)
if not first:
yield input
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input_dict
yield input
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
@property
def dataset(self):
return self.loader.dataset | 4,179 | 30.428571 | 92 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/pipelines/transforms.py | import cv2
import inspect
import numpy as np
from PIL import Image, ImageFilter
import torch
from torchvision import transforms as _transforms
from openselfsup.utils import build_from_cfg
from ..registry import PIPELINES
# register all existing transforms in torchvision
_EXCLUDED_TRANSFORMS = ['GaussianBlur']
for m in inspect.getmembers(_transforms, inspect.isclass):
if m[0] not in _EXCLUDED_TRANSFORMS:
PIPELINES.register_module(m[1])
@PIPELINES.register_module
class RandomAppliedTrans(object):
"""Randomly applied transformations.
Args:
transforms (list[dict]): List of transformations in dictionaries.
p (float): Probability.
"""
def __init__(self, transforms, p=0.5):
t = [build_from_cfg(t, PIPELINES) for t in transforms]
self.trans = _transforms.RandomApply(t, p=p)
def __call__(self, img):
return self.trans(img)
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
# custom transforms
@PIPELINES.register_module
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)."""
_IMAGENET_PCA = {
'eigval':
torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec':
torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
def __init__(self):
self.alphastd = 0.1
self.eigval = self._IMAGENET_PCA['eigval']
self.eigvec = self._IMAGENET_PCA['eigvec']
def __call__(self, img):
assert isinstance(img, torch.Tensor), \
"Expect torch.Tensor, got {}".format(type(img))
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
@PIPELINES.register_module
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709."""
def __init__(self, sigma_min, sigma_max):
self.sigma_min = sigma_min
self.sigma_max = sigma_max
def __call__(self, img):
sigma = np.random.uniform(self.sigma_min, self.sigma_max)
img = img.filter(ImageFilter.GaussianBlur(radius=sigma))
return img
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
@PIPELINES.register_module
class Solarization(object):
"""Solarization augmentation in BYOL https://arxiv.org/abs/2006.07733."""
def __init__(self, threshold=128):
self.threshold = threshold
def __call__(self, img):
img = np.array(img)
img = np.where(img < self.threshold, img, 255 -img)
return Image.fromarray(img.astype(np.uint8))
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
| 3,142 | 26.330435 | 80 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/extractor.py | import torch.nn as nn
from torch.utils.data import Dataset
from openselfsup.utils import nondist_forward_collect, dist_forward_collect
class Extractor(object):
"""Feature extractor.
Args:
dataset (Dataset | dict): A PyTorch dataset or dict that indicates
the dataset.
imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of
each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
dist_mode (bool): Use distributed extraction or not. Default: False.
"""
def __init__(self,
dataset,
imgs_per_gpu,
workers_per_gpu,
dist_mode=False):
from openselfsup import datasets
if isinstance(dataset, Dataset):
self.dataset = dataset
elif isinstance(dataset, dict):
self.dataset = datasets.build_dataset(dataset)
else:
raise TypeError(
'dataset must be a Dataset object or a dict, not {}'.format(
type(dataset)))
self.data_loader = datasets.build_dataloader(
self.dataset,
imgs_per_gpu,
workers_per_gpu,
dist=dist_mode,
shuffle=False)
self.dist_mode = dist_mode
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
def _forward_func(self, runner, **x):
backbone_feat = runner.model(mode='extract', **x)
last_layer_feat = runner.model.module.neck([backbone_feat[-1]])[0]
last_layer_feat = last_layer_feat.view(last_layer_feat.size(0), -1)
return dict(feature=last_layer_feat.cpu())
def __call__(self, runner):
func = lambda **x: self._forward_func(runner, **x)
if self.dist_mode:
feats = dist_forward_collect(
func,
self.data_loader,
runner.rank,
len(self.dataset),
ret_rank=-1)['feature'] # NxD
else:
feats = nondist_forward_collect(func, self.data_loader,
len(self.dataset))['feature']
return feats
| 2,196 | 34.435484 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/validate_hook.py | from mmcv.runner import Hook
import torch
from torch.utils.data import Dataset
from openselfsup.utils import nondist_forward_collect, dist_forward_collect
from .registry import HOOKS
@HOOKS.register_module
class ValidateHook(Hook):
"""Validation hook.
Args:
dataset (Dataset | dict): A PyTorch dataset or dict that indicates
the dataset.
dist_mode (bool): Use distributed evaluation or not. Default: True.
initial (bool): Whether to evaluate before the training starts.
Default: True.
interval (int): Evaluation interval (by epochs). Default: 1.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
def __init__(self,
dataset,
dist_mode=True,
initial=True,
interval=1,
**eval_kwargs):
from openselfsup import datasets
if isinstance(dataset, Dataset):
self.dataset = dataset
elif isinstance(dataset, dict):
self.dataset = datasets.build_dataset(dataset)
else:
raise TypeError(
'dataset must be a Dataset object or a dict, not {}'.format(
type(dataset)))
self.data_loader = datasets.build_dataloader(
self.dataset,
eval_kwargs['imgs_per_gpu'],
eval_kwargs['workers_per_gpu'],
dist=dist_mode,
shuffle=False,
prefetch=eval_kwargs.get('prefetch', False),
img_norm_cfg=eval_kwargs.get('img_norm_cfg', dict()),
)
self.dist_mode = dist_mode
self.initial = initial
self.interval = interval
self.eval_kwargs = eval_kwargs
def before_run(self, runner):
if self.initial:
self._run_validate(runner)
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
self._run_validate(runner)
def _run_validate(self, runner):
runner.model.eval()
func = lambda **x: runner.model(mode='test', **x)
if self.dist_mode:
results = dist_forward_collect(
func, self.data_loader, runner.rank,
len(self.dataset)) # dict{key: np.ndarray}
else:
results = nondist_forward_collect(func, self.data_loader,
len(self.dataset))
if runner.rank == 0:
for name, val in results.items():
self._evaluate(runner, torch.from_numpy(val), name)
runner.model.train()
def _evaluate(self, runner, results, keyword):
eval_res = self.dataset.evaluate(
results,
keyword=keyword,
logger=runner.logger,
**self.eval_kwargs['eval_param'])
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
| 3,003 | 33.528736 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/deepcluster_hook.py | import numpy as np
from mmcv.runner import Hook
import torch
import torch.distributed as dist
from openselfsup.third_party import clustering as _clustering
from openselfsup.utils import print_log
from .registry import HOOKS
from .extractor import Extractor
@HOOKS.register_module
class DeepClusterHook(Hook):
"""Hook for DeepCluster.
Args:
extractor (dict): Config dict for feature extraction.
clustering (dict): Config dict that specifies the clustering algorithm.
unif_sampling (bool): Whether to apply uniform sampling.
reweight (bool): Whether to apply loss re-weighting.
reweight_pow (float): The power of re-weighting.
init_memory (bool): Whether to initialize memory banks for ODC.
Default: False.
initial (bool): Whether to call the hook initially. Default: True.
interval (int): Frequency of epochs to call the hook. Default: 1.
dist_mode (bool): Use distributed training or not. Default: True.
data_loaders (DataLoader): A PyTorch dataloader. Default: None.
"""
def __init__(
self,
extractor,
clustering,
unif_sampling,
reweight,
reweight_pow,
init_memory=False, # for ODC
initial=True,
interval=1,
dist_mode=True,
data_loaders=None):
self.extractor = Extractor(dist_mode=dist_mode, **extractor)
self.clustering_type = clustering.pop('type')
self.clustering_cfg = clustering
self.unif_sampling = unif_sampling
self.reweight = reweight
self.reweight_pow = reweight_pow
self.init_memory = init_memory
self.initial = initial
self.interval = interval
self.dist_mode = dist_mode
self.data_loaders = data_loaders
def before_run(self, runner):
if self.initial:
self.deepcluster(runner)
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
self.deepcluster(runner)
def deepcluster(self, runner):
# step 1: get features
runner.model.eval()
features = self.extractor(runner)
runner.model.train()
# step 2: get labels
if not self.dist_mode or (self.dist_mode and runner.rank == 0):
clustering_algo = _clustering.__dict__[self.clustering_type](
**self.clustering_cfg)
# Features are normalized during clustering
clustering_algo.cluster(features, verbose=True)
assert isinstance(clustering_algo.labels, np.ndarray)
new_labels = clustering_algo.labels.astype(np.int64)
np.save(
"{}/cluster_epoch_{}.npy".format(runner.work_dir,
runner.epoch), new_labels)
self.evaluate(runner, new_labels)
else:
new_labels = np.zeros((len(self.data_loaders[0].dataset), ),
dtype=np.int64)
if self.dist_mode:
new_labels_tensor = torch.from_numpy(new_labels).cuda()
dist.broadcast(new_labels_tensor, 0)
new_labels = new_labels_tensor.cpu().numpy()
new_labels_list = list(new_labels)
# step 3: assign new labels
self.data_loaders[0].dataset.assign_labels(new_labels_list)
# step 4 (a): set uniform sampler
if self.unif_sampling:
self.data_loaders[0].sampler.set_uniform_indices(
new_labels_list, self.clustering_cfg.k)
# step 4 (b): set loss reweight
if self.reweight:
runner.model.module.set_reweight(new_labels, self.reweight_pow)
# step 5: randomize classifier
runner.model.module.head.init_weights(init_linear='normal')
if self.dist_mode:
for p in runner.model.module.head.state_dict().values():
dist.broadcast(p, 0)
# step 6: init memory for ODC
if self.init_memory:
runner.model.module.memory_bank.init_memory(features, new_labels)
def evaluate(self, runner, new_labels):
hist = np.bincount(new_labels, minlength=self.clustering_cfg.k)
empty_cls = (hist == 0).sum()
minimal_cls_size, maximal_cls_size = hist.min(), hist.max()
if runner.rank == 0:
print_log(
"empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format(
empty_cls.item(), minimal_cls_size.item(),
maximal_cls_size.item()),
logger='root')
| 4,637 | 36.104 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/contextmanagers.py | # coding: utf-8
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on
given CUDA streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += ' {} {:.2f} ms'.format(stream, elapsed_time)
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| 4,103 | 32.365854 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/optimizers.py | import torch
from torch.optim.optimizer import Optimizer, required
from torch.optim import *
class LARS(Optimizer):
r"""Implements layer-wise adaptive rate scaling for SGD.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): base learning rate (\gamma_0)
momentum (float, optional): momentum factor (default: 0) ("m")
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
("\beta")
dampening (float, optional): dampening for momentum (default: 0)
eta (float, optional): LARS coefficient
nesterov (bool, optional): enables Nesterov momentum (default: False)
Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.
Large Batch Training of Convolutional Networks:
https://arxiv.org/abs/1708.03888
Example:
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9,
>>> weight_decay=1e-4, eta=1e-3)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""
def __init__(self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
eta=0.001,
nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
if eta < 0.0:
raise ValueError("Invalid LARS coefficient value: {}".format(eta))
defaults = dict(
lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov, eta=eta)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(LARS, self).__init__(params, defaults)
def __setstate__(self, state):
super(LARS, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
eta = group['eta']
nesterov = group['nesterov']
lr = group['lr']
lars_exclude = group.get('lars_exclude', False)
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
if lars_exclude:
local_lr = 1.
else:
weight_norm = torch.norm(p).item()
grad_norm = torch.norm(d_p).item()
# Compute local learning rate for this layer
local_lr = eta * weight_norm / \
(grad_norm + weight_decay * weight_norm)
actual_lr = local_lr * lr
d_p = d_p.add(p, alpha=weight_decay).mul(actual_lr)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = \
torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(-d_p)
return loss
| 4,327 | 35.991453 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/profiling.py | import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CPU and GPU.
Useful as a temporary context manager to find sweet spots of
code suitable for async implementation.
"""
if (not enabled) or not torch.cuda.is_available():
yield
return
stream = stream if stream else torch.cuda.current_stream()
end_stream = end_stream if end_stream else stream
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
cpu_start = time.monotonic()
yield
finally:
cpu_end = time.monotonic()
end_stream.record_event(end)
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = "{} {} cpu_time {:.2f} ms ".format(trace_name, name,
cpu_time)
msg += "gpu_time {:.2f} ms stream {}".format(gpu_time, stream)
print(msg, end_stream)
| 1,363 | 32.268293 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/collect.py | import numpy as np
import mmcv
import torch
from .gather import gather_tensors_batch
def nondist_forward_collect(func, data_loader, length):
"""Forward and collect network outputs.
This function performs forward propagation and collects outputs.
It can be used to collect results, features, losses, etc.
Args:
func (function): The function to process data. The output must be
a dictionary of CPU tensors.
length (int): Expected length of output arrays.
Returns:
results_all (dict(np.ndarray)): The concatenated outputs.
"""
results = []
prog_bar = mmcv.ProgressBar(len(data_loader))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = func(**data)
results.append(result)
prog_bar.update()
results_all = {}
for k in results[0].keys():
results_all[k] = np.concatenate(
[batch[k].numpy() for batch in results], axis=0)
assert results_all[k].shape[0] == length
return results_all
def dist_forward_collect(func, data_loader, rank, length, ret_rank=-1):
"""Forward and collect network outputs in a distributed manner.
This function performs forward propagation and collects outputs.
It can be used to collect results, features, losses, etc.
Args:
func (function): The function to process data. The output must be
a dictionary of CPU tensors.
rank (int): This process id.
length (int): Expected length of output arrays.
ret_rank (int): The process that returns.
Other processes will return None.
Returns:
results_all (dict(np.ndarray)): The concatenated outputs.
"""
results = []
if rank == 0:
prog_bar = mmcv.ProgressBar(len(data_loader))
for idx, data in enumerate(data_loader):
with torch.no_grad():
result = func(**data) # dict{key: tensor}
results.append(result)
if rank == 0:
prog_bar.update()
results_all = {}
for k in results[0].keys():
results_cat = np.concatenate([batch[k].numpy() for batch in results],
axis=0)
if ret_rank == -1:
results_gathered = gather_tensors_batch(results_cat, part_size=20)
results_strip = np.concatenate(results_gathered, axis=0)[:length]
else:
results_gathered = gather_tensors_batch(
results_cat, part_size=20, ret_rank=ret_rank)
if rank == ret_rank:
results_strip = np.concatenate(
results_gathered, axis=0)[:length]
else:
results_strip = None
results_all[k] = results_strip
return results_all
| 2,773 | 32.02381 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/alias_multinomial.py | import torch
import numpy as np
class AliasMethod(object):
"""The alias method for sampling.
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
Args:
probs (Tensor): Sampling probabilities.
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0] * K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K * prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller + larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
"""Draw N samples from multinomial.
Args:
N (int): Number of samples.
Returns:
Tensor: Samples.
"""
K = self.alias.size(0)
kk = torch.zeros(
N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1 - b).long())
return oq + oj
| 2,132 | 27.065789 | 120 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/gather.py | import numpy as np
import torch
import torch.distributed as dist
def gather_tensors(input_array):
world_size = dist.get_world_size()
## gather shapes first
myshape = input_array.shape
mycount = input_array.size
shape_tensor = torch.Tensor(np.array(myshape)).cuda()
all_shape = [
torch.Tensor(np.array(myshape)).cuda() for i in range(world_size)
]
dist.all_gather(all_shape, shape_tensor)
## compute largest shapes
all_shape = [x.cpu().numpy() for x in all_shape]
all_count = [int(x.prod()) for x in all_shape]
all_shape = [list(map(int, x)) for x in all_shape]
max_count = max(all_count)
## padding tensors and gather them
output_tensors = [
torch.Tensor(max_count).cuda() for i in range(world_size)
]
padded_input_array = np.zeros(max_count)
padded_input_array[:mycount] = input_array.reshape(-1)
input_tensor = torch.Tensor(padded_input_array).cuda()
dist.all_gather(output_tensors, input_tensor)
## unpadding gathered tensors
padded_output = [x.cpu().numpy() for x in output_tensors]
output = [
x[:all_count[i]].reshape(all_shape[i])
for i, x in enumerate(padded_output)
]
return output
def gather_tensors_batch(input_array, part_size=100, ret_rank=-1):
# batch-wize gathering to avoid CUDA out of memory
rank = dist.get_rank()
all_features = []
part_num = input_array.shape[0] // part_size + 1 if input_array.shape[
0] % part_size != 0 else input_array.shape[0] // part_size
for i in range(part_num):
part_feat = input_array[i *
part_size:min((i + 1) *
part_size, input_array.shape[0]),
...]
assert part_feat.shape[
0] > 0, "rank: {}, length of part features should > 0".format(rank)
#print("rank: {}, gather part: {}/{}, length: {}".format(rank, i, part_num, len(part_feat)))
gather_part_feat = gather_tensors(part_feat)
all_features.append(gather_part_feat)
if ret_rank == -1:
all_features = [
np.concatenate([all_features[i][j] for i in range(part_num)],
axis=0) for j in range(len(all_features[0]))
]
return all_features
else:
if rank == ret_rank:
all_features = [
np.concatenate([all_features[i][j] for i in range(part_num)],
axis=0) for j in range(len(all_features[0]))
]
return all_features
else:
return None
| 2,629 | 36.571429 | 100 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/collect_env.py | import os.path as osp
import subprocess
import sys
from collections import defaultdict
import cv2
import mmcv
import torch
import torchvision
import openselfsup
def collect_env():
"""Collect the information of the running environments."""
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
from torch.utils.cpp_extension import CUDA_HOME
env_info['CUDA_HOME'] = CUDA_HOME
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output(
'"{}" -V | tail -n1'.format(nvcc), shell=True)
nvcc = nvcc.decode('utf-8').strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, devids in devices.items():
env_info['GPU ' + ','.join(devids)] = name
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
gcc = gcc.decode('utf-8').strip()
env_info['GCC'] = gcc
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = torch.__config__.show()
env_info['TorchVision'] = torchvision.__version__
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
env_info['OpenSelfSup'] = openselfsup.__version__
#from openselfsup.ops import get_compiler_version, get_compiling_cuda_version
#env_info['OpenSelfSup Compiler'] = get_compiler_version()
#env_info['OpenSelfSup CUDA Compiler'] = get_compiling_cuda_version()
return env_info
if __name__ == "__main__":
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
| 2,055 | 30.630769 | 81 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/flops_counter.py | # Modified from flops-counter.pytorch by Vladislav Sovrasov
# original repo: https://github.com/sovrasov/flops-counter.pytorch
# MIT License
# Copyright (c) 2018 Vladislav Sovrasov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import numpy as np
import torch
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _MaxPoolNd)
def get_model_complexity_info(model,
input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None,
ost=sys.stdout):
assert type(input_res) is tuple
assert len(input_res) >= 2
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty(
(1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model, ost=ost)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num):
"""converting number to string
:param float params_num: number
:returns str: number
>>> params_to_string(1e9)
'1000.0 M'
>>> params_to_string(2e5)
'200.0 k'
>>> params_to_string(3e-9)
'3e-09'
"""
if params_num // 10**6 > 0:
return str(round(params_num / 10**6, 2)) + ' M'
elif params_num // 10**3:
return str(round(params_num / 10**3, 2)) + ' k'
else:
return str(params_num)
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([
flops_to_string(
accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()
])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(
net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(
net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(
net_main_module)
net_main_module.compute_average_flops_cost = \
compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
def is_supported_instance(module):
for mod in hook_mapping:
if issubclass(type(module), mod):
return True
return False
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += int(batch_size * input.shape[1] * output.shape[1])
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def gn_flops_counter_hook(module, input, output):
elems = np.prod(input[0].shape)
# there is no precise FLOPs estimation of computing mean and variance,
# and we just set it 2 * elems: half muladds for computing
# means and half for computing vars
batch_flops = 3 * elems
if module.affine:
batch_flops += elems
module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
input_height, input_width = input.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = (
kernel_height * kernel_width * in_channels * filters_per_channel)
active_elements_count = batch_size * input_height * input_width
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
output_height, output_width = output.shape[2:]
bias_flops = out_channels * batch_size * output_height * output_height
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = np.prod(
kernel_dims) * in_channels * filters_per_channel
active_elements_count = batch_size * np.prod(output_dims)
if conv_module.__mask__ is not None:
# (b, 1, h, w)
output_height, output_width = output.shape[2:]
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height,
output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
hook_mapping = {
# conv
_ConvNd: conv_flops_counter_hook,
# deconv
_ConvTransposeMixin: deconv_flops_counter_hook,
# fc
nn.Linear: linear_flops_counter_hook,
# pooling
_AvgPoolNd: pool_flops_counter_hook,
_MaxPoolNd: pool_flops_counter_hook,
_AdaptiveAvgPoolNd: pool_flops_counter_hook,
_AdaptiveMaxPoolNd: pool_flops_counter_hook,
# activation
nn.ReLU: relu_flops_counter_hook,
nn.PReLU: relu_flops_counter_hook,
nn.ELU: relu_flops_counter_hook,
nn.LeakyReLU: relu_flops_counter_hook,
nn.ReLU6: relu_flops_counter_hook,
# normalization
_BatchNorm: bn_flops_counter_hook,
nn.GroupNorm: gn_flops_counter_hook,
# upsample
nn.Upsample: upsample_flops_counter_hook,
}
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
print('Warning! No positional inputs found for a module, '
'assuming batch size is 1.')
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
for mod_type, counter_hook in hook_mapping.items():
if issubclass(type(module), mod_type):
handle = module.register_forward_hook(counter_hook)
break
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# --- Masked flops counting
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
| 14,304 | 31.146067 | 79 | py |
rivuletpy | rivuletpy-master/filtering/riveal.py | import numpy as np
import math
import skfmm
from tqdm import tqdm
from scipy.ndimage.morphology import binary_dilation
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.noise import GaussianDropout, GaussianNoise
from keras.layers.advanced_activations import SReLU
def riveal(img, swc, K=9, nsample=8e4, epoch=20):
print('-- oiginal image size: ', img.shape)
K = math.floor(K) # Make sure K is integer to avoid warnings
# Pad the image and swc
margin = 3 * K
img = padimg(img, margin)
swc = padswc(swc, margin)
# Make the skeleton distance transform
print('-- Distance transform for swc...')
dt, foreground_region = make_skdt(img.shape, swc, K)
# Normalise data
# img = standardise(img)
# dt = standardise(dt)
img /= img.max()
# dt /= dt.max()
# Make the confident region
print('==swc shape:', swc.shape)
print('-- Making the confidence regions...(1/4)')
high_conf_region = make_conf_region(
img.shape, swc, K, low_conf=0.5, high_conf=1.)
# print('-- Making the confidence regions...(2/4)')
# mid_conf_region = make_conf_region(img.shape, swc, K,
# low_conf=0.25, high_conf=0.5)
print('-- Making the confidence regions...(3/4)')
low_conf_region = make_conf_region(
img.shape, swc, K, low_conf=0., high_conf=0.25)
# # Fill only the central part of background region
print('-- Making the confidence regions...(4/4)')
background_region = np.zeros(img.shape)
bg = np.logical_not(foreground_region)
bg = np.logical_and(foreground_region, img > 0)
for i in range(3):
bg = binary_dilation(bg)
background_region[margin:-margin, margin:-margin, margin:-margin] = bg[
margin:-margin, margin:-margin, margin:-margin]
from matplotlib import pyplot as plt
plt.subplot(3, 1, 1)
plt.imshow(high_conf_region.max(axis=-1))
plt.title('high conf')
plt.subplot(3, 1, 2)
plt.imshow(low_conf_region.max(axis=-1))
plt.title('low conf')
plt.subplot(3, 1, 3)
plt.imshow(background_region.max(axis=-1))
plt.title('bg')
plt.show()
# Randomly sample 2.5D blocks from the include region
print('-- Sampling blocks')
x1, y1 = sample_block(img, dt, high_conf_region, K,
math.ceil(nsample * 0.75))
# x2, y2 = sample_block(img, dt, mid_conf_region,
# K, math.ceil(nsample * 0.2))
x3, y3 = sample_block(img, dt, low_conf_region, K,
math.ceil(nsample * 0.1))
y3.fill(0.)
x4, y4 = sample_block(img, dt, background_region, K,
math.ceil(nsample * 0.15))
y4.fill(0.)
train_x = np.vstack((x1, x3, x4))
train_y = np.vstack((y1, y3, y4))
# Build the CNN with keras+tensorflow
print('--Training CNN...')
model = traincnn(train_x, train_y, K, epoch)
# Make the prediction within an area larger than
# the segmentation of the image
print('-- Predicting...')
bimg = img > 0
for i in range(6):
bimg = binary_dilation(bimg)
include_region = bimg > 0
include_idx = np.argwhere(include_region)
nidx = include_idx.shape[0]
predict_x = np.zeros((nsample, 2 * K + 1, 2 * K + 1, 3))
rest = nidx
resultimg = np.zeros(img.shape)
pbar = tqdm(total=nidx)
# Predict every batch of blocks
while rest > 0:
startidx = -rest
endidx = -rest + nsample if -rest + nsample < nidx else nidx
rest -= nsample
# Write the value to each include voxel
for i, gidx in enumerate(range(int(startidx), int(endidx))):
bx, by, bz = include_idx[gidx, :]
predict_x[i, :, :, 0] = img[bx - K:bx + K + 1, by - K:by + K + 1,
bz]
predict_x[i, :, :, 1] = img[bx - K:bx + K + 1, by, bz - K:bz + K +
1]
predict_x[i, :, :, 2] = img[bx, by - K:by + K + 1, bz - K:bz + K +
1]
pd = model.predict(predict_x, batch_size=64, verbose=0).flatten()
for i, gidx in enumerate(range(int(startidx), int(endidx))):
bx, by, bz = include_idx[gidx, :]
resultimg[bx, by, bz] = pd[i]
pbar.update(nsample)
resultimg = unpadimg(resultimg, margin)
return resultimg
def standardise(img, zeromean=True):
img = (img - img.mean()) / img.std()
return img
def constrain_range(min, max, minlimit, maxlimit):
return list(
range(min if min > minlimit else minlimit, max
if max < maxlimit else maxlimit))
def sample_block(img, dt, include_region, K, nsample):
include_idx = np.argwhere(include_region)
nidx = include_idx.shape[0]
nsample = nidx if nsample > nidx else nsample
idx2train = include_idx[np.random.choice(nidx, nsample), :]
# Claim the memory for 2.5D blocks
x = np.zeros((nsample, 2 * K + 1, 2 * K + 1, 3))
y = np.zeros((nsample, 1)) # Claim the memory for 2.5D blocks
for i in range(idx2train.shape[0]):
bx, by, bz = idx2train[i, :]
x[i, :, :, 0] = img[bx - K:bx + K + 1, by - K:by + K + 1, bz]
x[i, :, :, 1] = img[bx - K:bx + K + 1, by, bz - K:bz + K + 1]
x[i, :, :, 2] = img[bx, by - K:by + K + 1, bz - K:bz + K + 1]
y[i] = dt[bx, by, bz]
return x, y
def make_conf_region(imshape, swc, K, low_conf=0.0, high_conf=1.0):
if low_conf != 0.0 or high_conf != 1.0:
confswc = np.vstack((swc[np.logical_and(swc[:, 7] >= low_conf,
swc[:, 7] <= high_conf), :]))
region = np.zeros(imshape)
r = math.ceil(K * 0.75)
for i in range(confswc.shape[0]):
node = confswc[i, :]
n = [math.floor(n) for n in node[2:5]]
rg1 = constrain_range(n[0] - r, n[0] + r + 1, 0, imshape[0])
rg2 = constrain_range(n[1] - r, n[1] + r + 1, 0, imshape[1])
rg3 = constrain_range(n[2] - r, n[2] + r + 1, 0, imshape[2])
X, Y, Z = np.meshgrid(rg1, rg2, rg3)
# Skip if any node has empty box
if len(X) == 0 or len(Y) == 0 or len(Z) == 0:
continue
region[X, Y, Z] = 1
# _, region = make_skdt(imshape, confswc, K)
return region
def make_skdt(imshape, swc, K, a=6):
skimg = make_sk_img(imshape, swc)
dm = math.floor(K / 2)
dt = skfmm.distance(skimg, dx=1)
include_region = dt <= 1.5 * dm
zeromask = dt >= dm
dt = np.exp(a * (1 - dt / dm)) - 1
dt[zeromask] = 0
return dt, include_region
def makecnn(in_shape, K):
model = Sequential()
model.add(
Convolution2D(
32, 3, 3, border_mode='same', input_shape=in_shape[1:]))
model.add(SReLU())
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
model.add(GaussianNoise(1))
model.add(GaussianDropout(0.4))
model.add(Convolution2D(32, 3, 3, border_mode='same'))
model.add(SReLU())
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
model.add(GaussianNoise(1))
model.add(GaussianDropout(0.4))
model.add(Flatten())
model.add(Dense(64))
model.add(SReLU())
model.add(Dense(64))
# model.add(SReLU())
model.add(Dense(1))
model.add(Activation('linear'))
return model
def traincnn(x, y, K, epoch):
x = x.astype('float32')
y = y.astype('float32')
x /= x.max()
y /= y.max()
model = makecnn(x.shape, K)
model.compile(loss='mse', optimizer='rmsprop')
model.fit(x,
y,
batch_size=64,
nb_epoch=epoch,
validation_split=0.15,
shuffle=True)
return model
def make_sk_img(imshape, swc):
skimg = np.ones(imshape)
for i in range(swc.shape[0]):
node = [math.floor(n) for n in swc[i, 2:5]]
skimg[node[0], node[1], node[2]] = 0
return skimg
def padimg(img, margin):
pimg = np.zeros((img.shape[0] + 2 * margin, img.shape[1] + 2 * margin,
img.shape[2] + 2 * margin))
pimg[margin:margin + img.shape[0], margin:margin + img.shape[1], margin:
margin + img.shape[2]] = img
return pimg
def unpadimg(img, margin):
pimg = np.zeros((img.shape[0] - 2 * margin, img.shape[1] - 2 * margin,
img.shape[2] - 2 * margin))
pimg = img[margin:margin + img.shape[0], margin:margin + img.shape[1],
margin:margin + img.shape[2]]
return pimg
def padswc(swc, margin):
swc[:, 2:5] = swc[:, 2:5] + margin
return swc
| 8,673 | 32.233716 | 78 | py |
Mr.Right | Mr.Right-main/main.py | import yaml
import os
import utils
import warnings
from argparse import ArgumentParser
from torch import nn
from pytorch_lightning import Trainer,seed_everything
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint,LearningRateMonitor
from data.data_module import TextToMultiDataModule
from pltrainer import TextToMultiTrainer
from functools import partial
from models.model import TextToMultiModel
from transformers import (
BertTokenizer, RobertaTokenizerFast
)
warnings.filterwarnings("ignore")
def main(args,config):
seed_everything(config.seed)
# tokenizer
if args.pretrain == "ALBEF" or args.pretrain == "ViLT":
tokenizer = BertTokenizer.from_pretrained(
config.text_encoder,
cache_dir= args.cache_dir,
)
elif args.pretrain == "METER":
tokenizer = RobertaTokenizerFast.from_pretrained(
config.text_encoder,
cache_dir= args.cache_dir,
)
# dataset
print("Create Dataset")
data_module = TextToMultiDataModule(args,config,tokenizer)
if args.mode == "test":
data_module.prepare_data(test=config['test_file'],document=config['document'])
else:
data_module.prepare_data(train=config['train_file'],val=config['val_file'],test=config['test_file'],document=config['document'])
data_module.setup()
# mutli model
print("Create multi modal")
model = TextToMultiModel(tokenizer=tokenizer,config=config,args=args)
pltrainer = TextToMultiTrainer(args,config,model,tokenizer)
# logger
wandb_logger = WandbLogger(name=args.wandb_task_name,project="multimodalembedding", entity=args.wandb_entity)
checkpoint_callback = ModelCheckpoint(
filename= '{loss:.2f}-{val_loss:.2f}-{multi_r1:.2f}',
save_top_k=3,
verbose=False,
monitor='multi_r1',
mode='max'
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer_config = {
"default_root_dir": args.save_checkpoint,
"fast_dev_run": False,
# "gradient_clip_val": config.gradient_clip_value,
# "replace_sampler_ddp":False,
"strategy": "ddp",
"logger": wandb_logger,
"gpus": args.num_gpus,
"max_epochs": config["schedular"]["epochs"],
# "max_steps": config["schedular"]["max_steps"],
"auto_scale_batch_size": 'binsearch',
"progress_bar_refresh_rate": 1,
"precision": 16,
"check_val_every_n_epoch": 10,
"log_every_n_steps": 1,
"flush_logs_every_n_steps": 1,
"callbacks":[checkpoint_callback, lr_monitor],
}
trainer = Trainer(**trainer_config)
if args.mode == "train":
trainer.fit(pltrainer, data_module,ckpt_path=args.pl_checkpoint)
elif args.mode == "test":
trainer.test(pltrainer, data_module,ckpt_path=args.pl_checkpoint)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--wandb_task_name', default='testing')
parser.add_argument('--wandb_entity', default='multimodalembedding')
parser.add_argument('--num_gpus', default=1, type=int)
parser.add_argument("--config", default="configs/ALBEF.yaml", type=str)
parser.add_argument("--cache_dir", default="cache/", type=str)
parser.add_argument('--log_dir', default='logs/',type=str)
parser.add_argument("--mode", default="train", type=str,choices=['train','test'],help='choose your mode')
parser.add_argument("--pretrain", default="ALBEF", type=str,choices=['ALBEF','ViLT','MDETR','METER'],help='choose pretrain work')
parser.add_argument("--embeds_feats", default="avg", type=str,choices=['cls','avg','iavg_tcls'],help='how to deal with text and image embeddings')
parser.add_argument("--pickle_output", default="./", type=str,help='directory of testing pickle files')
parser.add_argument("--test_output", default="output.json", type=str,help='json files of testing result')
parser.add_argument("--save_checkpoint", default="checkpoints", type=str)
parser.add_argument('--pl_checkpoint', default=None,type=str,help='Load pytorch lightning checkpoint')
parser.add_argument('--batch_size', type=int, default=32,help='The batch size of each dataloader')
parser.add_argument('--num_workers', type=int, default=8, help='The number of workers in the DataLoader')
parser.add_argument('--shuffle', type=bool, default=True,help='Whether shuffle dataloader')
parser.add_argument('--ctx_prediction', action='store_true', help='Whether do context prediction')
parser.add_argument('--neg_matching', action='store_true', help='Whether do negative matching')
parser.add_argument('--neg_matchingv2', action='store_true', help='Whether do negative matching version2')
parser.add_argument('--test_rank', default=10, type=int, help='Step1. Contrastive -> rank -> Step2. Matching')
parser.add_argument('--re_ranking', action='store_true', help='Whether do re ranking for matching')
args = parser.parse_args()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir, exist_ok=True)
if not os.path.exists(args.cache_dir):
os.makedirs(args.cache_dir, exist_ok=True)
if not os.path.exists(args.save_checkpoint):
os.makedirs(args.save_checkpoint, exist_ok=True)
if args.pretrain == "ALBEF":
args.config = "configs/ALBEF.yaml"
elif args.pretrain == "ViLT":
args.config = "configs/ViLT.yaml"
elif args.pretrain == "MDETR":
args.config = "configs/MDETR.yaml"
elif args.pretrain == "METER":
args.config = "configs/METER.yaml"
with open(args.config) as f:
config = yaml.safe_load(f)
config = utils.AttrDict(config)
print(args)
print(config)
main(args,config) | 5,871 | 42.496296 | 150 | py |
Mr.Right | Mr.Right-main/pltrainer.py | import pdb
import utils
import json
import pickle
import torch
import os
import torch.nn.functional as F
import torch.distributed as dist
import pytorch_lightning as pl
from metric import score
from scheduler import CosineLRScheduler
from tqdm import tqdm
class TextToMultiTrainer(pl.LightningModule):
def __init__(self,
args,
config,
model,
tokenizer,
):
super().__init__()
self.args = args
self.config = config
self.arg_opt = utils.AttrDict(config['optimizer'])
self.arg_sche = utils.AttrDict(config['schedular'])
self.model = model
self.tokenizer = tokenizer
self.automatic_optimization = False # pytorch lightning turn off Optimize
self.step_size = 100
self.warmup_iterations = self.arg_sche.warmup_epochs*self.step_size
self.save_hyperparameters()
def training_step(self, train_batch, idx):
opt = self.optimizers()
opt.zero_grad()
query = train_batch['query_str_tensor']
doc_text = train_batch['doc_str_tensor']
doc_image = train_batch['doc_image_tensor']
doc_id = train_batch['doc_id']
context_labels = train_batch.get('context_labels', None)
loss_ita, loss_ctx_labels, loss_itm = self.model.forward(
query, doc_text, doc_image, doc_id, context_labels, self.args.neg_matching, self.args.neg_matchingv2)
loss = loss_ita + loss_ctx_labels + loss_itm
self.manual_backward(loss)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.gradient_clip_value)
opt.step()
self.log("loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("loss_ita", loss_ita, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
if self.args.ctx_prediction:
self.log("loss_ctx", loss_ctx_labels, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
if self.args.neg_matching:
self.log("loss_itm", loss_itm, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
sch = self.lr_schedulers()
# step every `n` epochs
if self.current_epoch==0 and self.global_step%self.step_size==0 and self.global_step<=self.warmup_iterations:
sch.step(self.global_step//self.step_size)
if self.trainer.is_last_batch:
sch.step(self.current_epoch+self.arg_sche.warmup_epochs+1)
return loss
def validation_step(self, val_batch, idx, dataloader_idx=0):
if dataloader_idx == 0: # query
img_query = val_batch['img_query_str_tensor']
txt_query = val_batch['txt_query_str_tensor']
multi_query = val_batch['multi_query_str_tensor']
doc_id = val_batch['doc_id']
img_query['input_ids'] = img_query['input_ids'].view(img_query['input_ids'].shape[0],-1)
img_query['attention_mask'] = img_query['attention_mask'].view(img_query['input_ids'].shape[0],-1)
if "token_type_ids" in img_query.keys():
img_query["token_type_ids"] = img_query["token_type_ids"].view(img_query['input_ids'].shape[0],-1)
txt_query['input_ids'] = txt_query['input_ids'].view(txt_query['input_ids'].shape[0],-1)
txt_query['attention_mask'] = txt_query['attention_mask'].view(txt_query['input_ids'].shape[0],-1)
if "token_type_ids" in txt_query.keys():
txt_query["token_type_ids"] = txt_query["token_type_ids"].view(txt_query['input_ids'].shape[0],-1)
multi_query['input_ids'] = multi_query['input_ids'].view(multi_query['input_ids'].shape[0],-1)
multi_query['attention_mask'] = multi_query['attention_mask'].view(multi_query['input_ids'].shape[0],-1)
if "token_type_ids" in multi_query.keys():
multi_query["token_type_ids"] = multi_query["token_type_ids"].view(multi_query['input_ids'].shape[0],-1)
img_query_embeds, img_query_feats = self.model.output_query_feats(img_query)
txt_query_embeds, txt_query_feats = self.model.output_query_feats(txt_query)
multi_query_embeds, multi_query_feats = self.model.output_query_feats(multi_query)
result = {
"img_query_text": val_batch['img_query_str'],
"img_query_feats": img_query_feats,
"txt_query_text": val_batch['txt_query_str'],
"txt_query_feats": txt_query_feats,
"multi_query_text": val_batch['multi_query_str'],
"multi_query_feats": multi_query_feats,
"docs_id": doc_id,
}
if self.args.re_ranking:
result["img_query_embeds"] = img_query_embeds
result["img_query_att"] = img_query['attention_mask']
result["txt_query_embeds"] = txt_query_embeds
result["txt_query_att"] = txt_query['attention_mask']
result["multi_query_embeds"] = multi_query_embeds
result["multi_query_att"] = multi_query['attention_mask']
elif dataloader_idx == 1: # document
doc_text = val_batch['doc_str_tensor']
doc_id = val_batch['doc_id']
doc_image = val_batch['doc_image_tensor']
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text.keys():
doc_text["token_type_ids"] = doc_text["token_type_ids"].view(doc_text['input_ids'].shape[0],-1)
docs_embeds,docs_feats,doc_masks = self.model.output_doc_feats(doc_text,doc_image)
result = {
"docs_text": val_batch['doc_str'],
"docs_image": val_batch['image_path'],
"docs_feats": docs_feats,
"docs_id": doc_id
}
# ===== Context Prediction =====
if self.args.ctx_prediction:
prediction_scores = self.model.context(docs_embeds)
mean_prediction_scores = torch.mean(prediction_scores,1)
top_context = torch.topk(mean_prediction_scores,20,dim=1)
result["top_context"] = top_context
# ===== Re-rank =====
if self.args.re_ranking:
result["doc_embeds"] = docs_embeds
result["doc_att"] = doc_masks
return result
def validation_epoch_end(self, validation_step_outputs):
queries = validation_step_outputs[0]
all_queries_doc_ids = torch.stack([feat for output in queries for feat in output["docs_id"]])
all_img_queries = torch.stack([feat for output in queries for feat in output["img_query_feats"]])
all_img_queries_text = [str(feat) for output in queries for feat in output["img_query_text"]]
all_txt_queries = torch.stack([feat for output in queries for feat in output["txt_query_feats"]])
all_txt_queries_text = [str(feat) for output in queries for feat in output["txt_query_text"]]
all_multi_queries = torch.stack([feat for output in queries for feat in output["multi_query_feats"]])
all_multi_queries_text = [str(feat) for output in queries for feat in output["multi_query_text"]]
docs = validation_step_outputs[1]
all_docs_ids = torch.stack([feat for output in docs for feat in output["docs_id"]])
all_docs = torch.stack([feat for output in docs for feat in output["docs_feats"]])
all_docs_captions = [str(feat) for output in docs for feat in output["docs_text"]]
all_docs_images = [str(feat) for output in docs for feat in output["docs_image"]]
if self.local_rank != None:
# id
all_queries_doc_ids_list = [torch.zeros_like(all_queries_doc_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_queries_doc_ids_list, all_queries_doc_ids)
all_queries_doc_ids = torch.cat(all_queries_doc_ids_list, dim=0)
temp_all_queries_doc_ids = all_queries_doc_ids
all_queries_doc_ids, rm_repeat_indices = self.unique(all_queries_doc_ids)
# image_query
all_img_queries_list = [torch.zeros_like(all_img_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_img_queries_list, all_img_queries)
all_img_queries = torch.cat(all_img_queries_list, dim=0)
all_img_queries = all_img_queries[rm_repeat_indices]
all_img_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_img_queries_text_list, all_img_queries_text)
all_img_queries_text = [text for queries_text in all_img_queries_text_list for text in queries_text]
all_img_queries_text = [all_img_queries_text[i] for i in rm_repeat_indices.tolist()]
# text_query
all_txt_queries_list = [torch.zeros_like(all_txt_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_txt_queries_list, all_txt_queries)
all_txt_queries = torch.cat(all_txt_queries_list, dim=0)
all_txt_queries = all_txt_queries[rm_repeat_indices]
all_txt_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_txt_queries_text_list, all_txt_queries_text)
all_txt_queries_text = [text for queries_text in all_txt_queries_text_list for text in queries_text]
all_txt_queries_text = [all_txt_queries_text[i] for i in rm_repeat_indices.tolist()]
# multi_query
all_multi_queries_list = [torch.zeros_like(all_multi_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_multi_queries_list, all_multi_queries)
all_multi_queries = torch.cat(all_multi_queries_list, dim=0)
all_multi_queries = all_multi_queries[rm_repeat_indices]
all_multi_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_multi_queries_text_list, all_multi_queries_text)
all_multi_queries_text = [text for queries_text in all_multi_queries_text_list for text in queries_text]
all_multi_queries_text = [all_multi_queries_text[i] for i in rm_repeat_indices.tolist()]
# multimodal_doc
all_doc_ids_list = [torch.zeros_like(all_docs_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_doc_ids_list, all_docs_ids)
all_docs_ids = torch.cat(all_doc_ids_list, dim=0)
all_docs_ids, rm_repeat_doc_indices = self.unique(all_docs_ids)
all_docs_list = [torch.zeros_like(all_docs) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_list, all_docs)
all_docs = torch.cat(all_docs_list, dim=0)
all_docs = all_docs[rm_repeat_doc_indices]
all_docs_captions_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_captions_list, all_docs_captions)
all_docs_captions = [caption for docs_captions in all_docs_captions_list for caption in docs_captions]
all_docs_captions = [all_docs_captions[i] for i in rm_repeat_doc_indices.tolist()]
all_docs_image_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_image_list, all_docs_images)
all_docs_images = [image for docs_images in all_docs_image_list for image in docs_images]
all_docs_images = [all_docs_images[i] for i in rm_repeat_doc_indices.tolist()]
if self.args.re_ranking:
all_docs_embeds = torch.stack([feat for output in docs for feat in output["doc_embeds"]])
all_docs_masks = torch.stack([feat for output in docs for feat in output["doc_att"]])
all_docs_embeds_list = [torch.zeros_like(all_docs_embeds) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_embeds_list, all_docs_embeds)
all_docs_embeds = torch.cat(all_docs_embeds_list, dim=0)
all_docs_embeds = all_docs_embeds[all_docs_ids]
all_docs_masks_list = [torch.zeros_like(all_docs_masks) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_masks_list, all_docs_masks)
all_docs_masks = torch.cat(all_docs_masks_list, dim=0)
all_docs_masks = all_docs_masks[all_docs_ids]
img_sims_matrix = all_img_queries @ all_docs.t()
txt_sims_matrix = all_txt_queries @ all_docs.t()
multi_sims_matrix = all_multi_queries @ all_docs.t()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
labels = F.one_hot(all_queries_doc_ids, len(all_docs)).to(self.device)
if self.args.re_ranking:
all_img_queries_embeds = torch.stack([feat for output in queries for feat in output["img_query_embeds"]])
all_img_queries_masks = torch.stack([feat for output in queries for feat in output["img_query_att"]])
all_txt_queries_embeds = torch.stack([feat for output in queries for feat in output["txt_query_embeds"]])
all_txt_queries_masks = torch.stack([feat for output in queries for feat in output["txt_query_att"]])
all_multi_queries_embeds = torch.stack([feat for output in queries for feat in output["multi_query_embeds"]])
all_multi_queries_masks = torch.stack([feat for output in queries for feat in output["multi_query_att"]])
score_matrix_i2m = torch.full((len(all_img_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_t2m = torch.full((len(all_txt_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_m2m = torch.full((len(all_multi_queries),len(all_docs)),-100.0).to(self.device)
for type in range(3):
temp_matrix = matrix_list[type]
for i,sims in enumerate(tqdm(temp_matrix)):
topk_sim, topk_idx = sims.topk(k=self.args.test_rank, dim=0)
if type == 0:
queries = all_img_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_img_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
elif type == 1:
queries = all_txt_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_txt_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
else:
queries = all_multi_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_multi_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
docs = all_docs_embeds[topk_idx]
docs_masks = all_docs_masks[topk_idx]
itm_logits = self.model.matching_classifier(
query_embeds=queries,
query_attns=queries_mask,
multi_embeds=docs,
multi_attns=docs_masks
)
if type == 0:
score_matrix_i2m[i,topk_idx] = itm_logits[:,1]
elif type == 1:
score_matrix_t2m[i,topk_idx] = itm_logits[:,1]
else:
score_matrix_m2m[i,topk_idx] = itm_logits[:,1]
if type == 0:
img_sims_matrix = score_matrix_i2m
img_sims_matrix = img_sims_matrix.cpu()
elif type == 1:
txt_sims_matrix = score_matrix_t2m
txt_sims_matrix = txt_sims_matrix.cpu()
else:
multi_sims_matrix = score_matrix_m2m
multi_sims_matrix = multi_sims_matrix.cpu()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
loss_i2m = -torch.sum(F.log_softmax(img_sims_matrix / self.model.temp, dim=1)*labels,dim=1).mean()
loss_t2m = -torch.sum(F.log_softmax(txt_sims_matrix / self.model.temp, dim=1)*labels,dim=1).mean()
loss_m2m = -torch.sum(F.log_softmax(multi_sims_matrix / self.model.temp, dim=1)*labels,dim=1).mean()
img_output_score = score(img_sims_matrix,all_queries_doc_ids)
txt_output_score = score(txt_sims_matrix,all_queries_doc_ids)
multi_output_score = score(multi_sims_matrix,all_queries_doc_ids)
val_loss = (loss_i2m + loss_t2m + loss_m2m) / 3
if self.args.ctx_prediction:
loss_ctx_pred = torch.stack([output["loss_ctx_pred"] for output in validation_step_outputs]).mean()
val_loss = (val_loss+loss_ctx_pred) / 2
self.log("val_loss_t2m", loss_t2m, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("val_loss_ctx_pred", loss_ctx_pred, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("val_loss", val_loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r1", img_output_score['r1'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_r5", img_output_score['r5'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_r10", img_output_score['r10'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_r_mean", img_output_score['r_mean'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_mrr10", img_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r1", txt_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r5", txt_output_score['r5'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("txt_r10", txt_output_score['r10'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("txt_r_mean", txt_output_score['r_mean'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("txt_mrr10", txt_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r1", multi_output_score['r1'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_r5", multi_output_score['r5'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_r10", multi_output_score['r10'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_r_mean", multi_output_score['r_mean'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_mrr10", multi_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return {
"val_loss":val_loss,
"loss_t2m":loss_t2m,
"loss_i2m":loss_i2m,
"loss_m2m":loss_m2m,
"img_r1":img_output_score['r1'],
"img_r5":img_output_score['r5'],
"img_r10":img_output_score['r10'],
"img_r_mean":img_output_score['r_mean'],
"img_mrr10":img_output_score['mrr10'],
"txt_r1":txt_output_score['r1'],
"txt_r5":txt_output_score['r5'],
"txt_r10":txt_output_score['r10'],
"txt_r_mean":txt_output_score['r_mean'],
"txt_mrr10":txt_output_score['mrr10'],
"multi_r1":multi_output_score['r1'],
"multi_r5":multi_output_score['r5'],
"multi_r10":multi_output_score['r10'],
"multi_r_mean":multi_output_score['r_mean'],
"multi_mrr10":multi_output_score['mrr10']
}
def test_step(self, test_batch, idx, dataloader_idx=0):
if dataloader_idx == 0: # query
img_query = test_batch['img_query_str_tensor']
txt_query = test_batch['txt_query_str_tensor']
multi_query = test_batch['multi_query_str_tensor']
doc_id = test_batch['doc_id']
img_query['input_ids'] = img_query['input_ids'].view(img_query['input_ids'].shape[0],-1)
img_query['attention_mask'] = img_query['attention_mask'].view(img_query['input_ids'].shape[0],-1)
if "token_type_ids" in img_query.keys():
img_query["token_type_ids"] = img_query["token_type_ids"].view(img_query['input_ids'].shape[0],-1)
txt_query['input_ids'] = txt_query['input_ids'].view(txt_query['input_ids'].shape[0],-1)
txt_query['attention_mask'] = txt_query['attention_mask'].view(txt_query['input_ids'].shape[0],-1)
if "token_type_ids" in txt_query.keys():
txt_query["token_type_ids"] = txt_query["token_type_ids"].view(txt_query['input_ids'].shape[0],-1)
multi_query['input_ids'] = multi_query['input_ids'].view(multi_query['input_ids'].shape[0],-1)
multi_query['attention_mask'] = multi_query['attention_mask'].view(multi_query['input_ids'].shape[0],-1)
if "token_type_ids" in multi_query.keys():
multi_query["token_type_ids"] = multi_query["token_type_ids"].view(multi_query['input_ids'].shape[0],-1)
img_query_embeds, img_query_feats = self.model.output_query_feats(img_query)
txt_query_embeds, txt_query_feats = self.model.output_query_feats(txt_query)
multi_query_embeds, multi_query_feats = self.model.output_query_feats(multi_query)
result = {
"img_query_text": test_batch['img_query_str'],
"img_query_feats": img_query_feats,
"txt_query_text": test_batch['txt_query_str'],
"txt_query_feats": txt_query_feats,
"multi_query_text": test_batch['multi_query_str'],
"multi_query_feats": multi_query_feats,
"docs_id": doc_id,
}
if self.args.re_ranking:
result["img_query_embeds"] = img_query_embeds
result["img_query_att"] = img_query['attention_mask']
result["txt_query_embeds"] = txt_query_embeds
result["txt_query_att"] = txt_query['attention_mask']
result["multi_query_embeds"] = multi_query_embeds
result["multi_query_att"] = multi_query['attention_mask']
elif dataloader_idx == 1: # document
doc_text = test_batch['doc_str_tensor']
doc_id = test_batch['doc_id']
doc_image = test_batch['doc_image_tensor']
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text.keys():
doc_text["token_type_ids"] = doc_text["token_type_ids"].view(doc_text['input_ids'].shape[0],-1)
docs_embeds,docs_feats,doc_masks = self.model.output_doc_feats(doc_text,doc_image)
result = {
"docs_text": test_batch['doc_str'],
"docs_image": test_batch['image_path'],
"docs_feats": docs_feats,
"docs_id": doc_id
}
# ===== Context Prediction =====
if self.args.ctx_prediction:
prediction_scores = self.model.context(docs_embeds)
mean_prediction_scores = torch.mean(prediction_scores,1)
top_context = torch.topk(mean_prediction_scores,20,dim=1)
result["top_context"] = top_context
# ===== Re-rank =====
if self.args.re_ranking:
result["doc_embeds"] = docs_embeds
result["doc_att"] = doc_masks
return result
def test_epoch_end(self, test_step_outputs):
queries = test_step_outputs[0]
all_queries_doc_ids = torch.stack([feat for output in queries for feat in output["docs_id"]])
all_img_queries = torch.stack([feat for output in queries for feat in output["img_query_feats"]])
all_img_queries_text = [str(feat) for output in queries for feat in output["img_query_text"]]
all_txt_queries = torch.stack([feat for output in queries for feat in output["txt_query_feats"]])
all_txt_queries_text = [str(feat) for output in queries for feat in output["txt_query_text"]]
all_multi_queries = torch.stack([feat for output in queries for feat in output["multi_query_feats"]])
all_multi_queries_text = [str(feat) for output in queries for feat in output["multi_query_text"]]
docs = test_step_outputs[1]
all_docs_ids = torch.stack([feat for output in docs for feat in output["docs_id"]])
all_docs = torch.stack([feat for output in docs for feat in output["docs_feats"]])
all_docs_captions = [str(feat) for output in docs for feat in output["docs_text"]]
all_docs_images = [str(feat) for output in docs for feat in output["docs_image"]]
if self.local_rank != None:
# id
all_queries_doc_ids_list = [torch.zeros_like(all_queries_doc_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_queries_doc_ids_list, all_queries_doc_ids)
all_queries_doc_ids = torch.cat(all_queries_doc_ids_list, dim=0)
temp_all_queries_doc_ids = all_queries_doc_ids
all_queries_doc_ids, rm_repeat_indices = self.unique(all_queries_doc_ids)
# image_query
all_img_queries_list = [torch.zeros_like(all_img_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_img_queries_list, all_img_queries)
all_img_queries = torch.cat(all_img_queries_list, dim=0)
all_img_queries = all_img_queries[rm_repeat_indices]
all_img_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_img_queries_text_list, all_img_queries_text)
all_img_queries_text = [text for queries_text in all_img_queries_text_list for text in queries_text]
all_img_queries_text = [all_img_queries_text[i] for i in rm_repeat_indices.tolist()]
# text_query
all_txt_queries_list = [torch.zeros_like(all_txt_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_txt_queries_list, all_txt_queries)
all_txt_queries = torch.cat(all_txt_queries_list, dim=0)
all_txt_queries = all_txt_queries[rm_repeat_indices]
all_txt_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_txt_queries_text_list, all_txt_queries_text)
all_txt_queries_text = [text for queries_text in all_txt_queries_text_list for text in queries_text]
all_txt_queries_text = [all_txt_queries_text[i] for i in rm_repeat_indices.tolist()]
# multi_query
all_multi_queries_list = [torch.zeros_like(all_multi_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_multi_queries_list, all_multi_queries)
all_multi_queries = torch.cat(all_multi_queries_list, dim=0)
all_multi_queries = all_multi_queries[rm_repeat_indices]
all_multi_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_multi_queries_text_list, all_multi_queries_text)
all_multi_queries_text = [text for queries_text in all_multi_queries_text_list for text in queries_text]
all_multi_queries_text = [all_multi_queries_text[i] for i in rm_repeat_indices.tolist()]
# multimodal_doc
all_doc_ids_list = [torch.zeros_like(all_docs_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_doc_ids_list, all_docs_ids)
temp_all_queries_list = all_doc_ids_list
all_docs_ids = torch.cat(all_doc_ids_list, dim=0)
temp_all_queries_list_fix = all_docs_ids
all_docs_ids, rm_repeat_doc_indices = self.unique(all_docs_ids)
all_docs_list = [torch.zeros_like(all_docs) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_list, all_docs)
all_docs = torch.cat(all_docs_list, dim=0)
temp_all_docs = all_docs
all_docs = all_docs[rm_repeat_doc_indices]
all_docs_captions_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_captions_list, all_docs_captions)
all_docs_captions = [caption for docs_captions in all_docs_captions_list for caption in docs_captions]
temp_all_docs_captions = all_docs_captions
all_docs_captions = [all_docs_captions[i] for i in rm_repeat_doc_indices.tolist()]
all_docs_image_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_image_list, all_docs_images)
all_docs_images = [image for docs_images in all_docs_image_list for image in docs_images]
all_docs_images = [all_docs_images[i] for i in rm_repeat_doc_indices.tolist()]
if self.args.re_ranking:
all_docs_embeds = torch.stack([feat for output in docs for feat in output["doc_embeds"]])
all_docs_masks = torch.stack([feat for output in docs for feat in output["doc_att"]])
all_docs_embeds_list = [torch.zeros_like(all_docs_embeds) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_embeds_list, all_docs_embeds)
all_docs_embeds = torch.cat(all_docs_embeds_list, dim=0)
all_docs_embeds = all_docs_embeds[all_docs_ids]
all_docs_masks_list = [torch.zeros_like(all_docs_masks) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_masks_list, all_docs_masks)
all_docs_masks = torch.cat(all_docs_masks_list, dim=0)
all_docs_masks = all_docs_masks[all_docs_ids]
if self.local_rank == 0:
output_dir = self.args.pickle_output
doc_path = os.path.join(output_dir,"multimodal_documents.pickle")
img_path = os.path.join(output_dir,"img_query.pickle")
txt_path = os.path.join(output_dir,"txt_query.pickle")
multi_path = os.path.join(output_dir,"multi_query.pickle")
labels_path = os.path.join(output_dir,"labels.pickle")
with open(doc_path, 'wb') as handle:
pickle.dump(all_docs, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(img_path, 'wb') as handle:
pickle.dump(all_img_queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(txt_path, 'wb') as handle:
pickle.dump(all_txt_queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(multi_path, 'wb') as handle:
pickle.dump(all_multi_queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(labels_path, 'wb') as handle:
pickle.dump(all_queries_doc_ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Finish saving pickle. Now you can compute the score.")
return {"img":True}
img_sims_matrix = all_img_queries @ all_docs.t()
txt_sims_matrix = all_txt_queries @ all_docs.t()
multi_sims_matrix = all_multi_queries @ all_docs.t()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
if self.args.re_ranking:
all_img_queries_embeds = torch.stack([feat for output in queries for feat in output["img_query_embeds"]])
all_img_queries_masks = torch.stack([feat for output in queries for feat in output["img_query_att"]])
all_txt_queries_embeds = torch.stack([feat for output in queries for feat in output["txt_query_embeds"]])
all_txt_queries_masks = torch.stack([feat for output in queries for feat in output["txt_query_att"]])
all_multi_queries_embeds = torch.stack([feat for output in queries for feat in output["multi_query_embeds"]])
all_multi_queries_masks = torch.stack([feat for output in queries for feat in output["multi_query_att"]])
score_matrix_i2m = torch.full((len(all_img_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_t2m = torch.full((len(all_txt_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_m2m = torch.full((len(all_multi_queries),len(all_docs)),-100.0).to(self.device)
for type in range(3):
temp_matrix = matrix_list[type]
for i,sims in enumerate(tqdm(temp_matrix)):
topk_sim, topk_idx = sims.topk(k=self.args.test_rank, dim=0)
if type == 0:
queries = all_img_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_img_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
elif type == 1:
queries = all_txt_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_txt_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
else:
queries = all_multi_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_multi_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
docs = all_docs_embeds[topk_idx]
docs_masks = all_docs_masks[topk_idx]
itm_logits = self.model.matching_classifier(
query_embeds=queries,
query_attns=queries_mask,
multi_embeds=docs,
multi_attns=docs_masks
)
if type == 0:
score_matrix_i2m[i,topk_idx] = itm_logits[:,1]
elif type == 1:
score_matrix_t2m[i,topk_idx] = itm_logits[:,1]
else:
score_matrix_m2m[i,topk_idx] = itm_logits[:,1]
if type == 0:
img_sims_matrix = score_matrix_i2m
img_sims_matrix = img_sims_matrix.cpu()
elif type == 1:
txt_sims_matrix = score_matrix_t2m
txt_sims_matrix = txt_sims_matrix.cpu()
else:
multi_sims_matrix = score_matrix_m2m
multi_sims_matrix = multi_sims_matrix.cpu()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
# img_output_score = score(img_sims_matrix,labels)
# txt_output_score = score(txt_sims_matrix,labels)
# multi_output_score = score(multi_sims_matrix,labels)
img_output_score = score_v2(img_sims_matrix,all_queries_doc_ids)
txt_output_score = score_v2(txt_sims_matrix,all_queries_doc_ids)
multi_output_score = score_v2(multi_sims_matrix,all_queries_doc_ids)
# output context
if self.args.ctx_prediction:
all_topk_context_values = torch.stack([feat for output in docs for feat in output["top_context"].values])
all_topk_context_indices = torch.stack([feat for output in docs for feat in output["top_context"].indices])
output_data = {"img_score":img_output_score,"txt_score":txt_output_score,"multi_score":multi_output_score,"result":[]}
# output result
for index,doc_scores in enumerate(img_sims_matrix):
temp_dict = {"img":{},"txt":{},"multi":{}}
for type in range(3):
if type == 0:
inds = torch.argsort(img_sims_matrix[index], descending=True)[:10].tolist()
temp_all_queries_text = all_img_queries_text
elif type == 1:
inds = torch.argsort(txt_sims_matrix[index], descending=True)[:10].tolist()
temp_all_queries_text = all_txt_queries_text
else:
inds = torch.argsort(multi_sims_matrix[index], descending=True)[:10].tolist()
temp_all_queries_text = all_multi_queries_text
temp = {"id": all_queries_doc_ids[index].item(),
"query":temp_all_queries_text[index],
"true_doc": all_docs_captions[all_queries_doc_ids[index]],
"true_image":all_docs_images[all_queries_doc_ids[index]],
}
rank = 1e20
true_id = all_queries_doc_ids[index]
if inds[0] == true_id:
temp["correct"] = True
else:
temp["correct"] = False
for rank,top_id in enumerate(inds):
dic_key_cap = str(rank)+"_doc"
dic_key_img = str(rank)+"_img"
temp[dic_key_cap] = all_docs_captions[top_id]
temp[dic_key_img] = all_docs_images[top_id]
if self.args.ctx_prediction:
context = self.tokenizer.decode(all_topk_context_indices[top_id])
dic_key_ctx = str(rank)+"_ctx"
temp[dic_key_ctx] = context
if type == 0:
temp_dict["img"] = temp
elif type == 1:
temp_dict["txt"] = temp
else:
temp_dict["multi"] = temp
output_data["result"].append(temp_dict)
with open(self.args.test_output, "w") as outfile:
json.dump(output_data, outfile, indent = 4)
self.log("img_r1", img_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r5", img_output_score['r5'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r10", img_output_score['r10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r_mean", img_output_score['r_mean'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_mrr10", img_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r1", txt_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r5", txt_output_score['r5'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r10", txt_output_score['r10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r_mean", txt_output_score['r_mean'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_mrr10", txt_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r1", multi_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r5", multi_output_score['r5'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r10", multi_output_score['r10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r_mean", multi_output_score['r_mean'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_mrr10", multi_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return {
"img_r1":img_output_score['r1'],
"img_r5":img_output_score['r5'],
"img_r10":img_output_score['r10'],
"img_r_mean":img_output_score['r_mean'],
"img_mrr10":img_output_score['mrr10'],
"txt_r1":txt_output_score['r1'],
"txt_r5":txt_output_score['r5'],
"txt_r10":txt_output_score['r10'],
"txt_r_mean":txt_output_score['r_mean'],
"txt_mrr10":txt_output_score['mrr10'],
"multi_r1":multi_output_score['r1'],
"multi_r5":multi_output_score['r5'],
"multi_r10":multi_output_score['r10'],
"multi_r_mean":multi_output_score['r_mean'],
"multi_mrr10":multi_output_score['mrr10']
}
def configure_optimizers(self):
# optimizer
opt_args = dict(lr=float(self.arg_opt.lr), weight_decay=float(self.arg_opt.weight_decay))
if hasattr(self.arg_opt, 'opt_eps') and self.arg_opt.opt_eps is not None:
opt_args['eps'] = self.args.opt_eps
if hasattr(self.arg_opt, 'opt_betas') and self.arg_opt.opt_betas is not None:
opt_args['betas'] = self.args.opt_betas
if hasattr(self.arg_opt, 'opt_args') and self.arg_opt.opt_args is not None:
opt_args.update(self.args.opt_args)
if self.arg_opt.opt == "adamW":
optimizer = torch.optim.AdamW(self.model.parameters(),**opt_args)
# scheduler
lr_scheduler = None
if self.arg_sche.sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=self.trainer.max_epochs,
t_mul=getattr(self.arg_sche, 'lr_cycle_mul', 1.),
lr_min=float(self.arg_sche.min_lr),
decay_rate=self.arg_sche.decay_rate,
warmup_lr_init=float(self.arg_sche.warmup_lr),
warmup_t=self.arg_sche.warmup_epochs,
cycle_limit=getattr(self.arg_sche, 'lr_cycle_limit', 1),
)
# num_epochs = lr_scheduler.get_cycle_length() + self.arg_sche.cooldown_epochs
# self.trainer.max_epochs = num_epochs
return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
# https://github.com/pytorch/pytorch/issues/36748
def unique(self,x, dim=-1):
unique, inverse = torch.unique(x, return_inverse=True, dim=dim)
perm = torch.arange(inverse.size(dim), dtype=inverse.dtype, device=inverse.device)
inverse, perm = inverse.flip([dim]), perm.flip([dim])
return unique, inverse.new_empty(unique.size(dim)).scatter_(dim, inverse, perm) | 43,280 | 57.095302 | 126 | py |
Mr.Right | Mr.Right-main/metric.py | import numpy as np
import torch
import pdb
from torchmetrics.functional import retrieval_recall,retrieval_reciprocal_rank
@torch.no_grad()
def score(scores_t2m, query_doc_id):
"""
scores_t2m: (q_size, d_size)
query_doc_id: (q_size)
"""
ids = query_doc_id.unsqueeze(1)
top1_i = torch.topk(scores_t2m, k=1, dim=1).indices
top5_i = torch.topk(scores_t2m, k=5, dim=1).indices
top10_v, top10_i = torch.topk(scores_t2m, k=10, dim=1)
r1 = torch.mean(torch.sum(top1_i == ids, dim=1).float()).item() * 100
r5 = torch.mean(torch.sum(top5_i == ids, dim=1).float()).item() * 100
r10 = torch.mean(torch.sum(top10_i == ids, dim=1).float()).item() * 100
rmean = np.mean([r1, r5, r10])
top10_m = (top10_i==ids)
mrr10 = np.mean([retrieval_reciprocal_rank(v, m).item() for v, m in zip(top10_v, top10_m)]) * 100
r1, r5, r10, mrr10, rmean
eval_result = {'r1': r1,
'r5': r5,
'r10': r10,
'mrr10': mrr10,
'r_mean': rmean,
}
return eval_result | 1,097 | 30.371429 | 103 | py |
Mr.Right | Mr.Right-main/scheduler/cosine_lr.py | """ Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from .scheduler import Scheduler
from pdb import set_trace as breakpoint
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=True,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
| 4,027 | 33.135593 | 121 | py |
Mr.Right | Mr.Right-main/scheduler/scheduler.py | from typing import Dict, Any
import torch
class Scheduler:
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self.get_update_values(num_updates)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
if apply_noise:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
lrs = [v + v * noise for v in lrs]
return lrs
| 4,750 | 43.820755 | 112 | py |
Mr.Right | Mr.Right-main/models/matching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MatchingModel(nn.Module):
def __init__(self, args, config, text_width, n_layers):
super().__init__()
self.config = config
from models.ALBEF.models.xbert import BertModel
self.config.num_hidden_layers = 4
self.config.fusion_layer = 0
self.itm_transformer = BertModel(self.config, add_pooling_layer=False)
self.itm_head = nn.Linear(text_width, 2)
def forward(self, query_embeds, query_attns, multi_embeds, multi_attns):
output = self.itm_transformer(
encoder_embeds = query_embeds,
attention_mask = query_attns,
encoder_hidden_states = multi_embeds,
encoder_attention_mask = multi_attns,
return_dict = True,
mode = 'fusion',
)
embeddings = output.last_hidden_state[:, 0, :]
logits = self.itm_head(embeddings)
return logits
if __name__ == "__main__":
from models.ALBEF.models.xbert import BertConfig
import yaml
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
with open('configs/model_retrieval.yaml') as f:
config = yaml.safe_load(f)
config = AttrDict(config)
bert_config = BertConfig.from_json_file(config['bert_config'])
m = MatchingModel(config=bert_config, text_width=bert_config.hidden_size, n_layers=1)
e1 = torch.rand(5, 10, 768)
e2 = torch.rand(5, 7, 768)
a1 = torch.ones(e1.size()[:-1],dtype=torch.long)
a2 = torch.ones(e2.size()[:-1],dtype=torch.long)
o = m(e1, a1, e2, a2)
print(o.shape)
| 1,817 | 29.3 | 89 | py |
Mr.Right | Mr.Right-main/models/model.py | import pdb
import torch
import torch.nn.functional as F
from torch import nn
from models.ALBEF.models.model_retrieval import ALBEF
from models.ALBEF.models.vit import interpolate_pos_embed
from models.ALBEF.models.xbert import BertOnlyMLMHead,BertConfig
from models.ViLT.vilt.modules import ViLTransformerSS
from models.METER.meter.modules import METERTransformerSS
from models.matching import MatchingModel
class TextToMultiModel(nn.Module):
def __init__(self,
args = None,
config = None,
tokenizer = None,
):
super().__init__()
self.args = args
self.tokenizer = tokenizer
embed_dim = config.embed_dim
# Choose pretrain
if args.pretrain == "ALBEF":
self.model = ALBEF(config.text_encoder,tokenizer,config)
text_width = self.model.text_encoder.config.hidden_size
bert_config = self.model.bert_config
if config.checkpoint!="":
checkpoint = torch.load(config.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],self.model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
for key in list(state_dict.keys()):
if 'bert' in key:
encoder_key = key.replace('bert.','')
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
msg = self.model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%config.checkpoint)
print(msg)
elif args.pretrain == "ViLT":
self.model = ViLTransformerSS(config)
text_width = config.hidden_size
bert_config = BertConfig.from_json_file(config.bert_config)
elif args.pretrain == "METER":
self.model = METERTransformerSS(config)
text_width = config.hidden_size
bert_config = BertConfig.from_json_file(config.bert_config)
if args.pretrain == "METER" and (self.args.embeds_feats == "cls" or self.args.embeds_feats == "iavg_tcls"):
self.multi_proj = nn.Linear(config.hidden_size*2, embed_dim)
else:
self.multi_proj = nn.Linear(config.hidden_size, embed_dim)
self.query_proj = nn.Linear(text_width, embed_dim)
if 'vocab_size' in config:
bert_config.vocab_size = config.vocab_size
self.context = BertOnlyMLMHead(bert_config)
self.temp = nn.Parameter(torch.ones([]) * config['temp'])
self.queue_size = config['queue_size']
# Matching
self.matching_classifier = MatchingModel(args = args, config=bert_config, text_width=text_width, n_layers=1)
# create the queue
self.register_buffer("multi_queue", torch.randn(embed_dim, self.queue_size))
# self.register_buffer("query_queue", torch.randn(embed_dim, self.queue_size))
self.register_buffer("idx_queue", torch.full((1,self.queue_size),-100))
self.register_buffer("queue_num", torch.zeros(1, dtype=torch.long))
self.multi_queue = nn.functional.normalize(self.multi_queue, dim=0)
# self.query_queue = nn.functional.normalize(self.query_queue, dim=0)
def forward_feats(self, doc_image, doc_text, query):
result = {}
if self.args.pretrain == "ALBEF":
output = self.model(doc_image,doc_text,query)
elif self.args.pretrain == "ViLT":
output = self.model(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
"query_ids":query['input_ids'],
"query_masks":query['attention_mask'],
})
elif self.args.pretrain == "METER":
output = self.model(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
"query_ids":query['input_ids'],
"query_masks":query['attention_mask'],
})
result["query_embeds"] = output["query_embeds"]
result["query_atts"] = output["query_atts"]
result["multi_embeds"] = output["multi_embeds"]
result["multi_atts"] = output["multi_atts"]
# How to get feature
if self.args.embeds_feats == "avg":
avg_query_embeds = (result["query_embeds"] * result["query_atts"].unsqueeze(-1)).sum(dim=1) / result["query_atts"].sum(dim=1).unsqueeze(-1)
result["query_feat"] = F.normalize(self.query_proj(avg_query_embeds),dim=-1)
avg_multi_embeds = (result["multi_embeds"] * result["multi_atts"].unsqueeze(-1)).sum(dim=1) / result["multi_atts"].sum(dim=1).unsqueeze(-1)
result["multi_feat"] = F.normalize(self.multi_proj(avg_multi_embeds),dim=-1)
elif self.args.embeds_feats == "cls":
result["query_feat"] = output["query_cls"].float()
if self.args.pretrain == "METER": # METER has two cls token
multi_embeds = torch.cat([output["text_cls"], output["img_cls"]], dim=-1).float()
result["multi_feat"] = F.normalize(self.multi_proj(multi_embeds),dim=-1)
else:
result["multi_feat"] = output["multi_cls"].float()
elif self.args.embeds_feats == "iavg_tcls":
result["query_feat"] = output["query_cls"].float()
text_cls = output["text_cls"]
avg_img_embeds = (output["image_feats"] * output["image_masks"].unsqueeze(-1)).sum(dim=1) / output["image_masks"].sum(dim=1).unsqueeze(-1)
concat_embeds = torch.cat([text_cls, avg_img_embeds], dim=-1).float()
avg_multi_feat = F.normalize(self.multi_proj(concat_embeds),dim=-1)
result["multi_feat"] = avg_multi_feat.float()
return result
def forward(self, query, doc_text, doc_image, doc_id, context_labels=None, matching=None, matchingv2=None):
query['input_ids'] = query['input_ids'].view(query['input_ids'].shape[0],-1)
query['attention_mask'] = query['attention_mask'].view(query['input_ids'].shape[0],-1)
if "token_type_ids" in query:
query['token_type_ids'] = query['token_type_ids'].view(query['input_ids'].shape[0],-1)
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text:
doc_text['token_type_ids'] = doc_text['token_type_ids'].view(doc_text['input_ids'].shape[0],-1)
result = self.forward_feats(doc_image, doc_text, query)
multi_feat = result['multi_feat'] # B, 1, H
query_feat = result['query_feat'] # B, 1, H
multi_embeds = result['multi_embeds'] # B, L, H
multi_atts = result['multi_atts'] # B, L, H
query_embeds = result['query_embeds']
query_atts = result['query_atts']
# [TODO]
# why only query with doc similarity
# why not add doc with query similarity?
with torch.no_grad():
multi_feat_all = torch.cat([multi_feat.t(),self.multi_queue.clone().detach()],dim=1)
# query_feat_all = torch.cat([query_feat.t(),self.query_queue.clone().detach()],dim=1)
sim_q2m = query_feat @ multi_feat_all / self.temp
# sim_m2q = multi_feat @ query_feat_all / self.temp
idx = doc_id.view(-1,1) # batch_size, id [0,1,2,0]
idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()],dim=1) # 1, queue_size [[0,1,2,0,12,13,1,0,2]]
pos_idx = torch.eq(idx, idx_all).float() # 1,queue_size [[1,0,0,1,0,0,0,1,0],...]
sim_targets = pos_idx / pos_idx.sum(1,keepdim=True) # normalzie [[0.33,0,0,0.33,0,0,0,0.33,0],...]
loss_q2m = -torch.sum(F.log_softmax(sim_q2m, dim=1)*sim_targets,dim=1).mean()
# loss_m2q = -torch.sum(F.log_softmax(sim_m2q, dim=1)*sim_targets,dim=1).mean()
# loss_ita = (loss_q2m + loss_m2q)/2
loss_ita = loss_q2m
self._dequeue_and_enqueue(multi_feat, None, idx) # text_feat_m
# self._dequeue_and_enqueue(multi_feat, query_feat, idx) # text_feat_m
# ===== Matching Loss =====
# Matching Classification
# Pos (multi_embeds, query_embeds)
# Neg (multi_embeds_neg, query_embeds)
# Neg (multi_embeds_image+multi_embeds_neg_text ,query_embeds)
# Neg (multi_embeds_text+multi_embeds_neg_image ,query_embeds)
# Neg (multi_embeds, query_embeds_neg)
loss_itm = 0.0
if matching:
with torch.no_grad():
bs = doc_image.size(0)
mask = torch.eq(idx, idx.T)
weights_q2m = F.softmax(sim_q2m[:, :bs]+1e-4, dim=1)
weights_q2m.masked_fill_(mask, 1e-10)
# weights_m2q = F.softmax(sim_m2q[:, :bs]+1e-4, dim=1)
# weights_m2q.masked_fill_(mask, 1e-10)
# [New]
if matchingv2:
neg_doc_text_inps = []
neg_doc_text_atts = []
neg_doc_image = []
multi_embeds_neg = []
multi_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_q2m[b], 1).item()
multi_embeds_neg.append(multi_embeds[neg_idx])
multi_atts_neg.append(multi_atts[neg_idx])
#[New]
if matchingv2:
neg_doc_text_inps.append(doc_text['input_ids'][neg_idx])
neg_doc_text_atts.append(doc_text['attention_mask'][neg_idx])
neg_doc_image.append(doc_image[neg_idx])
multi_embeds_neg = torch.stack(multi_embeds_neg, dim=0)
multi_atts_neg = torch.stack(multi_atts_neg, dim=0)
# [New]
if matchingv2:
neg_doc_text_inps = torch.stack(neg_doc_text_inps, dim=0)
neg_doc_text_atts = torch.stack(neg_doc_text_atts, dim=0)
neg_doc_text = {'input_ids': neg_doc_text_inps, 'attention_mask': neg_doc_text_atts}
neg_doc_image = torch.stack(neg_doc_image, dim=0)
result_neg_image = self.forward_feats(neg_doc_image, doc_text ,query)
multi_embeds_image_neg = result_neg_image['multi_embeds']
multi_atts_image_neg = result_neg_image['multi_atts']
result_neg_text = self.forward_feats(doc_image, neg_doc_text ,query)
multi_embeds_text_neg = result_neg_text['multi_embeds']
multi_atts_text_neg = result_neg_text['multi_atts']
# Quadra
if matchingv2:
query_embeds_matching = torch.cat([query_embeds, query_embeds, query_embeds, query_embeds], dim=0)
query_attn_matching = torch.cat([query_atts, query_atts, query_atts, query_atts], dim=0)
multi_embeds_matching = torch.cat([multi_embeds, multi_embeds_neg, multi_embeds_image_neg, multi_embeds_text_neg], dim=0)
multi_attn_matching = torch.cat([multi_atts, multi_atts_neg, multi_atts_image_neg, multi_atts_text_neg], dim=0)
else:
# Binary
query_embeds_matching = torch.cat([query_embeds, query_embeds], dim=0)
query_attn_matching = torch.cat([query_atts, query_atts], dim=0)
multi_embeds_matching = torch.cat([multi_embeds, multi_embeds_neg], dim=0)
multi_attn_matching = torch.cat([multi_atts, multi_atts_neg], dim=0)
# Triple
# query_embeds_matching = torch.cat([query_embeds, query_embeds, query_embeds_neg], dim=0)
# query_attn_matching = torch.cat([query_atts, query_atts, query_atts_neg], dim=0)
# multi_embeds_matching = torch.cat([multi_embeds, multi_embeds_neg, multi_embeds], dim=0)
# multi_attn_matching = torch.cat([multi_atts, multi_atts_neg, multi_atts], dim=0)
itm_logits = self.matching_classifier(
query_embeds=query_embeds_matching,
query_attns=query_attn_matching,
multi_embeds=multi_embeds_matching,
multi_attns=multi_attn_matching
)
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),
torch.zeros(bs * 3 if matchingv2 else bs * 1,dtype=torch.long)],
dim=0).to(doc_image.device)
# [TODO] Binary_CE
loss_itm = F.cross_entropy(itm_logits, itm_labels)
loss_ctx_pred = 0.0
# ===== Context Prediction Loss =====
if context_labels is not None:
ctx_targets = context_labels / context_labels.sum(1,keepdim=True)
prediction_scores = self.context(multi_embeds)
mean_prediction_scores = torch.mean(prediction_scores,1)
loss_ctx_pred = -torch.sum(F.log_softmax(mean_prediction_scores, dim=1)*ctx_targets,dim=1).mean()
return loss_ita, loss_ctx_pred, loss_itm
def output_itm_logits(self, query, doc_text, doc_image):
query['input_ids'] = query['input_ids'].view(query['input_ids'].shape[0],-1)
query['attention_mask'] = query['attention_mask'].view(query['input_ids'].shape[0],-1)
if "token_type_ids" in query:
query['token_type_ids'] = query['token_type_ids'].view(query['input_ids'].shape[0],-1)
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text:
doc_text['token_type_ids'] = doc_text['token_type_ids'].view(doc_text['input_ids'].shape[0],-1)
result = self.forward_feats(doc_image, doc_text, query)
multi_feat = result['multi_feat'] # B, 1, H
query_feat = result['query_feat'] # B, 1, H
multi_embeds = result['multi_embeds'] # B, L, H
multi_atts = result['multi_atts'] # B, L, H
query_embeds = result['query_embeds']
query_atts = result['query_atts']
itm_logits = self.matching_classifier(
query_embeds=query_embeds,
query_attns=query_atts,
multi_embeds=multi_embeds,
multi_attns=multi_atts
)
return itm_logits
@torch.no_grad()
def output_query_feats(self,query):
if self.args.pretrain == "ALBEF":
query_output = self.model.text_encoder(query['input_ids'], attention_mask = query["attention_mask"], mode='text')
query_embeds = query_output.last_hidden_state
query_masks = query["attention_mask"]
query_cls = F.normalize(self.model.text_proj(query_embeds[:,0,:]),dim=-1)
elif self.args.pretrain == "ViLT":
query_embeds = self.model.text_embeddings(query['input_ids'])
query_masks = query['attention_mask']
for i, blk in enumerate(self.model.transformer.blocks):
query_embeds, _ = blk(query_embeds, mask=query_masks)
query_embeds = self.model.transformer.norm(query_embeds)
query_cls = F.normalize(self.query_proj(query_embeds[:,0,:]),dim=-1)
elif self.args.pretrain == "METER":
query_embeds = self.model.text_transformer.embeddings(input_ids=query['input_ids'])
query_masks = query['attention_mask']
device = query_embeds.device
input_shape = query_masks.size()
extend_query_masks = self.model.text_transformer.get_extended_attention_mask(query_masks, input_shape, device)
for layer in self.model.text_transformer.encoder.layer:
query_embeds = layer(query_embeds, extend_query_masks)[0]
query_embeds = self.model.cross_modal_text_transform(query_embeds)
query_cls = self.model.cross_modal_text_pooler(query_embeds)
if self.args.embeds_feats == "avg":
avg_query_embeds = (query_embeds * query_masks.unsqueeze(-1)).sum(dim=1) / query_masks.sum(dim=1).unsqueeze(-1)
query_feat = F.normalize(self.query_proj(avg_query_embeds),dim=-1)
elif self.args.embeds_feats == "cls":
query_feat = query_cls.float()
elif self.args.embeds_feats == "iavg_tcls":
query_feat = query_cls.float()
return query_embeds,query_feat
@torch.no_grad()
def output_doc_feats(self,doc_text,doc_image):
if self.args.pretrain == "ALBEF":
image_embeds = self.model.visual_encoder(doc_image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(doc_image.device)
doc_text_output = self.model.text_encoder(doc_text['input_ids'], attention_mask = doc_text["attention_mask"], mode='text')
doc_text_embeds = doc_text_output.last_hidden_state
output = self.model.text_encoder(encoder_embeds = doc_text_embeds,
attention_mask = doc_text["attention_mask"],
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
mode = 'fusion',
)
multi_embeds = output.last_hidden_state
multi_atts = doc_text["attention_mask"]
multi_cls = F.normalize(self.model.text_proj(multi_embeds[:,0,:]),dim=-1)
elif self.args.pretrain == "ViLT":
output = self.model.output_multi(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
})
multi_embeds = output["multi_embeds"]
multi_atts = output["multi_atts"]
multi_cls = output["multi_cls"]
elif self.args.pretrain == "METER":
output = self.model.output_multi(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
})
multi_embeds = output["multi_embeds"]
multi_atts = output["multi_atts"]
text_cls = output["text_cls"]
img_cls = output["cls_feats_image"]
if self.args.embeds_feats == "avg":
avg_multi_embeds = (multi_embeds * multi_atts.unsqueeze(-1)).sum(dim=1) / multi_atts.sum(dim=1).unsqueeze(-1)
avg_multi_feat = F.normalize(self.multi_proj(avg_multi_embeds),dim=-1)
elif self.args.embeds_feats == "cls":
if self.args.pretrain == "METER": # METER has two cls token
multi_embeds = torch.cat([text_cls, img_cls], dim=-1).float()
avg_multi_feat = F.normalize(self.multi_proj(multi_embeds),dim=-1)
else:
avg_multi_feat = multi_cls.float()
elif self.args.embeds_feats == "iavg_tcls":
text_cls = output["text_cls"]
img_embeds = output["image_feats"]
avg_img_embeds = (output["image_feats"] * output["image_masks"].unsqueeze(-1)).sum(dim=1) / output["image_masks"].sum(dim=1).unsqueeze(-1)
concat_embeds = torch.cat([text_cls, avg_img_embeds], dim=-1).float()
avg_multi_feat = F.normalize(self.multi_proj(concat_embeds),dim=-1)
return multi_embeds,avg_multi_feat, multi_atts
@torch.no_grad()
def _dequeue_and_enqueue(self, multi_feat, query_feat, idx):
# gather keys before updating queue
idxs = concat_all_gather(idx)
if multi_feat is not None:
multi_feats = concat_all_gather(multi_feat)
batch_size = multi_feats.shape[0]
ptr = int(self.queue_num)
assert self.queue_size % batch_size == 0 # for simplicity
self.multi_queue[:, ptr:ptr + batch_size] = multi_feats.T
if query_feat is not None:
query_feats = concat_all_gather(query_feat)
batch_size = query_feats.shape[0]
ptr = int(self.queue_num)
assert self.queue_size % batch_size == 0 # for simplicity
self.query_queue[:, ptr:ptr + batch_size] = query_feats.T
# replace the keys at ptr (dequeue and enqueue)
self.idx_queue[:, ptr:ptr + batch_size] = idxs.T
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_num[0] = ptr
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
group = torch.distributed.group.WORLD
world_size = torch.distributed.get_world_size(group)
tensors_gather = [torch.ones_like(tensor)
for _ in range(world_size)]
torch.distributed.all_gather(tensors_gather, tensor, group, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 21,799 | 50.294118 | 151 | py |
Mr.Right | Mr.Right-main/models/METER/azure_distributed_run.py | import os
import copy
import pytorch_lightning as pl
import os
os.environ["NCCL_DEBUG"] = "INFO"
from meter.config import ex
from meter.modules import METERTransformerSS
from meter.datamodules.multitask_datamodule import MTDataModule
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
from pytorch_lightning.plugins.environments import ClusterEnvironment
from pytorch_lightning.plugins.training_type import DDPPlugin
import torch.distributed as dist
class MyCluster(ClusterEnvironment):
def creates_children(self) -> bool:
# return True if the cluster is managed (you don't launch processes yourself)
return True
def master_address(self):
return os.environ['MASTER_ADDR']
def master_port(self) -> int:
return int(os.environ["MASTER_PORT"])
def world_size(self):
return int(os.environ['OMPI_COMM_WORLD_SIZE'])
def global_rank(self) -> int:
return int(os.environ['OMPI_COMM_WORLD_RANK'])
def local_rank(self) -> int:
return int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
def node_rank(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_NODE_RANK"])
def set_global_rank(self, rank: int) -> None:
pass
def set_world_size(self, size: int) -> None:
pass
class MyDDPPlugin(DDPPlugin):
def init_ddp_connection(self, global_rank = None, world_size = None) -> None:
master_uri = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
dist.init_process_group(
backend=self.torch_distributed_backend,
init_method=master_uri,
world_size=int(os.environ['OMPI_COMM_WORLD_SIZE']),
rank=int(os.environ['OMPI_COMM_WORLD_RANK']),
)
@ex.automain
def main(_config):
os.environ["NCCL_DEBUG"] = "INFO"
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
local_size = int(os.environ['OMPI_COMM_WORLD_LOCAL_SIZE'])
global_rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
master_addr = os.environ['MASTER_ADDR']
master_port = os.environ['MASTER_PORT']
# set environment variables for 'env://'
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['NODE_RANK'] = str(os.environ["OMPI_COMM_WORLD_NODE_RANK"])
_config = copy.deepcopy(_config)
pl.seed_everything(_config["seed"])
dm = MTDataModule(_config, dist=True)
model = METERTransformerSS(_config)
exp_name = f'{_config["exp_name"]}'
os.makedirs(_config["log_dir"], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
save_top_k=1,
verbose=True,
monitor="val/the_metric",
mode="max",
save_last=True,
)
logger = pl.loggers.TensorBoardLogger(
_config["log_dir"],
name=f'{exp_name}_seed{_config["seed"]}_from_{_config["load_path"].split("/")[-1][:-5]}',
)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
grad_steps = max(_config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
), 1)
max_steps = _config["max_steps"] if _config["max_steps"] is not None else None
trainer = pl.Trainer(
plugins=[MyCluster(), MyDDPPlugin()],
gpus=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
accelerator="ddp",
benchmark=True,
deterministic=True,
max_epochs=_config["max_epoch"] if max_steps is None else 1000,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
prepare_data_per_node=False,
replace_sampler_ddp=False,
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
flush_logs_every_n_steps=10,
resume_from_checkpoint=_config["resume_from"],
weights_summary="top",
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
)
if not _config["test_only"]:
trainer.fit(model, datamodule=dm)
else:
trainer.test(model, datamodule=dm)
| 4,388 | 31.272059 | 97 | py |
Mr.Right | Mr.Right-main/models/METER/setup.py | from setuptools import setup, find_packages
setup(
name="meter",
packages=find_packages(
exclude=[".dfc", ".vscode", "dataset", "notebooks", "result", "scripts"]
),
version="0.1.0",
license="MIT",
description="METER: Multimodal End-to-end TransformER",
author="Microsoft Corporation",
author_email="zdou0830@gmail.com",
url="https://github.com/zdou0830/METER",
keywords=["vision and language pretraining"],
install_requires=["torch", "pytorch_lightning"],
)
| 511 | 29.117647 | 80 | py |
Mr.Right | Mr.Right-main/models/METER/run.py | import os
import copy
import pytorch_lightning as pl
import os
os.environ["NCCL_DEBUG"] = "INFO"
from meter.config import ex
from meter.modules import METERTransformerSS
from meter.datamodules.multitask_datamodule import MTDataModule
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
@ex.automain
def main(_config):
_config = copy.deepcopy(_config)
pl.seed_everything(_config["seed"])
dm = MTDataModule(_config, dist=True)
model = METERTransformerSS(_config)
exp_name = f'{_config["exp_name"]}'
os.makedirs(_config["log_dir"], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
save_top_k=1,
verbose=True,
monitor="val/the_metric",
mode="max",
save_last=True,
)
logger = pl.loggers.TensorBoardLogger(
_config["log_dir"],
name=f'{exp_name}_seed{_config["seed"]}_from_{_config["load_path"].split("/")[-1][:-5]}',
)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
grad_steps = max(_config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
), 1)
max_steps = _config["max_steps"] if _config["max_steps"] is not None else None
trainer = pl.Trainer(
gpus=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
accelerator="ddp",
benchmark=True,
deterministic=True,
max_epochs=_config["max_epoch"] if max_steps is None else 1000,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
prepare_data_per_node=False,
replace_sampler_ddp=False,
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
flush_logs_every_n_steps=10,
resume_from_checkpoint=_config["resume_from"],
weights_summary="top",
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
)
if not _config["test_only"]:
trainer.fit(model, datamodule=dm)
else:
trainer.test(model, datamodule=dm)
| 2,373 | 29.050633 | 97 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/clip_model.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, x_mask:torch.Tensor):
if x_mask is not None:
x_mask = x_mask.to(dtype=torch.bool, device=x.device)
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=x_mask)[0]
def forward(self, x: torch.Tensor, x_mask:torch.Tensor=None):
x = x + self.attention(self.ln_1(x), x_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers-1)])
def forward(self, x: torch.Tensor, x_mask: torch.Tensor=None):
for block in self.resblocks:
x = block(x, x_mask)
return x
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int, resolution_after: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((resolution_after // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
def forward(self, x: torch.Tensor, x_mask):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
t=self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
x = torch.cat([t, x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, x_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x)
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
resolution_after=224,
):
super().__init__()
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
resolution_after=resolution_after,
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.visual.transformer.width ** -0.5) * ((2 * self.visual.transformer.layers) ** -0.5)
attn_std = self.visual.transformer.width ** -0.5
fc_std = (2 * self.visual.transformer.width) ** -0.5
for block in self.visual.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def forward(self, image, image_mask=None):
return self.visual(image.type(self.dtype), image_mask)
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
}
import os
import hashlib
import urllib
from tqdm import tqdm
import warnings
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def adapt_position_encoding(model, patch_size=32, after=384,
suffix='visual.positional_embedding'):
keys = [k for k in model if k.endswith(suffix)]
assert len(keys) == 1
key = keys[0]
origin_pos_embed = model[key]
origin_dim2 = False
if len(origin_pos_embed.shape) == 2:
origin_dim2 = True
origin_pos_embed = origin_pos_embed.unsqueeze(0)
grid_before = int(np.sqrt(origin_pos_embed.shape[1] - 1))
before = int(grid_before*patch_size)
assert (before % patch_size) == 0
grid_after = after // patch_size
assert (after % patch_size) == 0
embed_dim = origin_pos_embed.shape[-1]
pos_embed = origin_pos_embed[0, 1:, :].reshape((grid_before, grid_before, embed_dim))
new_size = (grid_after, grid_after)
pos_embed = torch.nn.functional.interpolate(pos_embed.permute((2, 0, 1)).unsqueeze(0), size=new_size, mode='bicubic')
pos_embed = pos_embed.squeeze(0).permute((1, 2, 0)).reshape((-1, embed_dim))
pos_embed = torch.cat((origin_pos_embed[0, 0:1, :], pos_embed), dim=0).unsqueeze(0)
assert pos_embed.shape == (1, grid_after * grid_after + 1, embed_dim)
if origin_dim2:
assert pos_embed.shape[0] == 1
pos_embed = pos_embed.squeeze(0)
model[key] = pos_embed
return model
def build_model(name, resolution_after=224):
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}"
)
try:
model = torch.jit.load(model_path, map_location="cpu")
state_dict = None
except RuntimeError:
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
state_dict = state_dict or model.state_dict()
vit = "visual.proj" in state_dict
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
resolution_after,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
model_dict = model.state_dict()
pretrained_dict = state_dict
if resolution_after != image_resolution:
pretrained_dict = adapt_position_encoding(pretrained_dict, after=resolution_after, patch_size=vision_patch_size)
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
return model
| 11,209 | 39.179211 | 142 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/meter_utils.py | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from .dist_utils import all_gather
from .objectives import compute_irtr_recall
from ..gadgets.my_metrics import Accuracy, VQAScore, Scalar
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v <= 0:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "snli":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_irtr_loss", Scalar())
elif k == "mppd" or k == "mpfr":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += ir_r1.item() + tr_r1.item()
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v <= 0:
continue
value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "nlvr2" or loss_name == 'snli':
if phase == "train":
value = getattr(pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
value = getattr(pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
elif loss_name == "irtr":
pl_module.log(
f"{loss_name}/{phase}/irtr_loss_epoch",
getattr(pl_module, f"{phase}_irtr_loss").compute(),
)
getattr(pl_module, f"{phase}_irtr_loss").reset()
elif loss_name == "mppd" or loss_name == "mpfr":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric)
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item()
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v > 0
]
return
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
]
head_names = ["vqa_classifier", "nlvr2_classifier", "mlm_score", "itm_score", "snli_classifier"]
cross_modal_names = ['cross_modal']
lr_mult_head = pl_module.hparams.config["lr_mult_head"]
lr_mult_cross_modal = pl_module.hparams.config["lr_mult_cross_modal"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr * lr_mult_head,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult_head,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr * lr_mult_cross_modal,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult_cross_modal,
},
]
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
)
| 11,926 | 38.363036 | 100 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/swin_transformer.py | """ Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
"""
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import logging
import math
from copy import deepcopy
from typing import Optional
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg
from .swin_helpers import swin_build_model_with_cfg
from timm.models.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import checkpoint_filter_fn, _init_vit_weights
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
'swin_base_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_base_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',
),
'swin_large_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_large_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',
),
'swin_small_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',
),
'swin_tiny_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',
),
'swin_base_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_base_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
}
def window_partition(x, window_size: int):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size: int, H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask: Optional[torch.Tensor] = None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, weight_init='', **kwargs):
super().__init__()
window_size=int(img_size/32)
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
self.patch_grid = self.patch_embed.grid_size
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
else:
self.absolute_pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
layers = []
for i_layer in range(self.num_layers):
layers += [BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
if weight_init.startswith('jax'):
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
self.apply(_init_vit_weights)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.absolute_pos_embed is not None:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x = self.layers(x)
x = self.norm(x) # B L C
return x
def forward(self, x):
x = self.forward_features(x)
return x
def _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs['config']['image_size']
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = swin_build_model_with_cfg(
SwinTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_strict=False,
**kwargs)
return model
@register_model
def swin_base_patch4_window12_384(pretrained=False, **kwargs):
""" Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window12_384(pretrained=False, **kwargs):
""" Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_small_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_tiny_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-T @ 224x224, trained ImageNet-1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs):
""" Swin-B @ 384x384, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs):
""" Swin-B @ 224x224, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs):
""" Swin-L @ 384x384, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs):
""" Swin-L @ 224x224, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)
| 27,086 | 41.191589 | 125 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/bert_model.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# if True:
if self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertCrossLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
encoder_hidden_states,
attention_mask=None,
encoder_attention_mask=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = None #past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask=None,
output_attentions=output_attentions,
past_key_value=None,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
None,
encoder_hidden_states,
encoder_attention_mask,
None,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 76,774 | 41.915036 | 213 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/meter_module.py | import torch
import torch.nn as nn
import pytorch_lightning as pl
import numpy as np
import pdb
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertModel, BertEncoder, BertLayer
from .bert_model import BertCrossLayer, BertAttention
from . import swin_transformer as swin
from . import heads, objectives
# meter_utils
from .clip_model import build_model, adapt_position_encoding
from .swin_helpers import swin_adapt_position_encoding
from transformers import RobertaConfig, RobertaModel
class METERTransformerSS(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
self.is_clip= (not 'swin' in config['vit'])
if 'roberta' in config['tokenizer']:
bert_config = RobertaConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
else:
bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
resolution_after=config['image_size']
self.cross_modal_text_transform = nn.Linear(config['input_text_embed_size'], config['hidden_size'])
self.cross_modal_text_transform.apply(objectives.init_weights)
self.cross_modal_image_transform = nn.Linear(config['input_image_embed_size'], config['hidden_size'])
self.cross_modal_image_transform.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, config["hidden_size"])
self.token_type_embeddings.apply(objectives.init_weights)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.is_clip:
build_model(config['vit'], resolution_after=resolution_after)
else:
getattr(swin, self.hparams.config["vit"])(
pretrained=True, config=self.hparams.config,
)
if 'roberta' in config['tokenizer']:
RobertaModel.from_pretrained(config['tokenizer'])
else:
BertModel.from_pretrained(config['tokenizer'])
torch.distributed.barrier()
if self.is_clip:
self.vit_model = build_model(config['vit'], resolution_after=resolution_after)
else:
self.vit_model = getattr(swin, self.hparams.config["vit"])(
pretrained=True, config=self.hparams.config,
)
self.avgpool = nn.AdaptiveAvgPool1d(1)
if 'roberta' in config['tokenizer']:
self.text_transformer = RobertaModel.from_pretrained(config['tokenizer'])
else:
self.text_transformer = BertModel.from_pretrained(config['tokenizer'])
self.cross_modal_image_layers = nn.ModuleList([BertCrossLayer(bert_config) for _ in range(config['num_top_layer'])])
self.cross_modal_image_layers.apply(objectives.init_weights)
self.cross_modal_text_layers = nn.ModuleList([BertCrossLayer(bert_config) for _ in range(config['num_top_layer'])])
self.cross_modal_text_layers.apply(objectives.init_weights)
self.cross_modal_image_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_image_pooler.apply(objectives.init_weights)
self.cross_modal_text_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_text_pooler.apply(objectives.init_weights)
if config["loss_names"]["mlm"] > 0:
self.mlm_score = heads.MLMHead(bert_config)
self.mlm_score.apply(objectives.init_weights)
if config["loss_names"]["itm"] > 0:
self.itm_score = heads.ITMHead(config["hidden_size"]*2)
self.itm_score.apply(objectives.init_weights)
hs = self.hparams.config["hidden_size"]
if self.hparams.config["loss_names"]["vqa"] > 0:
vs = self.hparams.config["vqav2_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
# ===================== Downstream ===================== #
if self.hparams.config["loss_names"]["nlvr2"] > 0:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 4, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(3, hs)
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
if self.hparams.config["loss_names"]["snli"] > 0:
self.snli_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 3),
)
self.snli_classifier.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["irtr"] > 0:
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.itm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.itm_score.fc.bias.data[1:]
self.margin = 0.2
for p in self.itm_score.parameters():
p.requires_grad = False
# meter_utils.set_metrics(self)
self.current_tasks = list()
# ===================== load downstream (test_only) ======================
if self.hparams.config["checkpoint"] != "":
ckpt = torch.load(self.hparams.config["checkpoint"], map_location="cpu")
state_dict = ckpt["state_dict"]
if self.is_clip:
state_dict = adapt_position_encoding(state_dict, after=resolution_after, patch_size=self.hparams.config['patch_size'])
else:
state_dict = swin_adapt_position_encoding(state_dict, after=resolution_after, before=config['resolution_before'])
msg = self.load_state_dict(state_dict, strict=False)
print(msg)
def infer(
self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
img=None,
):
do_mlm = "_mlm" if mask_text else ""
# query text
query_ids = batch[f"query_ids"]
query_masks = batch[f"query_masks"]
query_embeds = self.text_transformer.embeddings(input_ids=query_ids)
device = query_embeds.device
input_shape = query_masks.size()
extend_query_masks = self.text_transformer.get_extended_attention_mask(query_masks, input_shape, device)
for layer in self.text_transformer.encoder.layer:
query_embeds = layer(query_embeds, extend_query_masks)[0]
query_embeds = self.cross_modal_text_transform(query_embeds)
query_cls = self.cross_modal_text_pooler(query_embeds)
# doc text
text_ids = batch[f"text_ids{do_mlm}"]
# text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
device = text_embeds.device
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(text_masks, input_shape, device)
for layer in self.text_transformer.encoder.layer:
text_embeds = layer(text_embeds, extend_text_masks)[0]
text_embeds = self.cross_modal_text_transform(text_embeds)
img = batch["image"]
image_embeds = self.vit_model(img)
image_embeds = self.cross_modal_image_transform(image_embeds)
image_masks = torch.ones((image_embeds.size(0), image_embeds.size(1)), dtype=torch.long, device=device)
extend_image_masks = self.text_transformer.get_extended_attention_mask(image_masks, image_masks.size(), device)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds
+ self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
),
)
x, y = text_embeds, image_embeds
for text_layer, image_layer in zip(self.cross_modal_text_layers, self.cross_modal_image_layers):
x1 = text_layer(x, y, extend_text_masks, extend_image_masks)
y1 = image_layer(y, x, extend_image_masks, extend_text_masks)
x, y = x1[0], y1[0]
text_feats, image_feats = x, y
cls_feats_text = self.cross_modal_text_pooler(x)
if self.is_clip:
cls_feats_image = self.cross_modal_image_pooler(y)
else:
avg_image_feats = self.avgpool(image_feats.transpose(1, 2)).view(image_feats.size(0), 1, -1)
cls_feats_image = self.cross_modal_image_pooler(avg_image_feats)
cls_feats = torch.cat([cls_feats_text, cls_feats_image], dim=-1)
ret = {
"query_embeds": query_embeds,
"query_atts": query_masks,
"query_cls":query_cls,
"text_feats": text_feats,
"text_masks": text_masks,
"text_cls":cls_feats_text,
"multi_embeds": torch.cat([image_feats,text_feats],dim=1),
"multi_atts":torch.cat([image_masks,text_masks],dim=-1),
"cls_feats_text": cls_feats_text,
"image_feats": image_feats,
"image_masks": image_masks,
"cls_feats_image": cls_feats_image,
"img_cls":cls_feats_image,
"cls_feats": cls_feats,
# "text_labels": text_labels,
# "text_ids": text_ids,
}
return ret
@torch.no_grad()
def output_multi( self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
img=None,):
img = batch["image"]
do_mlm = "_mlm" if mask_text else ""
# doc text
text_ids = batch[f"text_ids{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
device = text_embeds.device
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(text_masks, input_shape, device)
for layer in self.text_transformer.encoder.layer:
text_embeds = layer(text_embeds, extend_text_masks)[0]
text_embeds = self.cross_modal_text_transform(text_embeds)
img = batch["image"]
image_embeds = self.vit_model(img)
image_embeds = self.cross_modal_image_transform(image_embeds)
image_masks = torch.ones((image_embeds.size(0), image_embeds.size(1)), dtype=torch.long, device=device)
extend_image_masks = self.text_transformer.get_extended_attention_mask(image_masks, image_masks.size(), device)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds
+ self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
),
)
x, y = text_embeds, image_embeds
for text_layer, image_layer in zip(self.cross_modal_text_layers, self.cross_modal_image_layers):
x1 = text_layer(x, y, extend_text_masks, extend_image_masks)
y1 = image_layer(y, x, extend_image_masks, extend_text_masks)
x, y = x1[0], y1[0]
text_feats, image_feats = x, y
cls_feats_text = self.cross_modal_text_pooler(x)
if self.is_clip:
cls_feats_image = self.cross_modal_image_pooler(y)
else:
avg_image_feats = self.avgpool(image_feats.transpose(1, 2)).view(image_feats.size(0), 1, -1)
cls_feats_image = self.cross_modal_image_pooler(avg_image_feats)
cls_feats = torch.cat([cls_feats_text, cls_feats_image], dim=-1)
ret = {
"text_feats": text_feats,
"text_masks": text_masks,
"text_cls":cls_feats_text,
"cls_feats_text": cls_feats_text,
"image_feats": image_feats,
"image_masks": image_masks,
"cls_feats_image": cls_feats_image,
"cls_feats": cls_feats,
"img_cls":cls_feats_image,
"multi_embeds": torch.cat([image_feats,text_feats],dim=1),
"multi_atts":torch.cat([image_masks,text_masks],dim=-1),
}
return ret
def forward(self, batch):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ret.update(objectives.compute_mlm(self, batch))
# Image Text Matching
if "itm" in self.current_tasks:
ret.update(objectives.compute_itm(self, batch))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch))
# Natural Language for Visual Reasoning 2
if "nlvr2" in self.current_tasks:
ret.update(objectives.compute_nlvr2(self, batch))
# SNLI Visual Entailment
if "snli" in self.current_tasks:
ret.update(objectives.compute_snli(self, batch))
# Image Retrieval and Text Retrieval
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch))
return ret
def training_step(self, batch, batch_idx):
# meter_utils.set_task(self)
output = self(batch)
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
# meter_utils.epoch_wrapup(self)
pass
def validation_step(self, batch, batch_idx):
# meter_utils.set_task(self)
pass
output = self(batch)
def validation_epoch_end(self, outs):
pass
# meter_utils.epoch_wrapup(self)
def test_step(self, batch, batch_idx):
pass
meter_utils.set_task(self)
output = self(batch)
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["load_path"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, model_name)
# meter_utils.epoch_wrapup(self)
def configure_optimizers(self):
pass
# return meter_utils.set_schedule(self)
| 15,962 | 40.141753 | 134 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/dist_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 7,814 | 27.837638 | 100 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/objectives.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from .dist_utils import all_gather
def compute_mlm(pl_module, batch):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret
def compute_itm(pl_module, batch):
pos_len = len(batch["text"]) // 2
neg_len = len(batch["text"]) - pos_len
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
itm_images = [
torch.stack(
[
ti if itm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["image"], batch["false_image_0"])
]
batch = {k: v for k, v in batch.items()}
batch["image"] = itm_images
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
itm_logits = pl_module.itm_score(infer["cls_feats"])
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret
def compute_snli(pl_module, batch):
infer = pl_module.infer(
batch, mask_text=False, mask_image=False,
)
snli_logits = pl_module.snli_classifier(infer["cls_feats"])
snli_labels = batch["labels"]
snli_labels = torch.tensor(snli_labels).to(pl_module.device).long()
snli_loss = F.cross_entropy(snli_logits, snli_labels.view(-1))
ret = {
"snli_loss": snli_loss,
"snli_logits": snli_logits,
"snli_labels": snli_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_snli_loss")(ret["snli_loss"])
acc = getattr(pl_module, f"{phase}_snli_accuracy")(
ret["snli_logits"], ret["snli_labels"]
)
pl_module.log(f"snli/{phase}/loss", loss)
pl_module.log(f"snli/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_snli_loss")(
F.cross_entropy(
ret["snli_logits"][dev_batches], ret["snli_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_snli_accuracy")(
ret["snli_logits"][dev_batches], ret["snli_labels"][dev_batches]
)
pl_module.log(f"snli/dev/loss", dev_loss)
pl_module.log(f"snli/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_snli_loss")(
F.cross_entropy(
ret["snli_logits"][test_batches], ret["snli_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_snli_accuracy")(
ret["snli_logits"][test_batches], ret["snli_labels"][test_batches]
)
pl_module.log(f"snli/test/loss", test_loss)
pl_module.log(f"snli/test/accuracy", test_acc)
return ret
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels.view(-1))
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret
def compute_irtr(pl_module, batch):
is_training_phase = pl_module.training
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w)
infer = pl_module.infer(
{
"image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
phase = "train" if pl_module.training else "val"
irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
return ret
@torch.no_grad()
def compute_irtr_recall(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
#TODO: speed up the process by caching text/image features
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
image_preload.append((_b['image'][0], _b["img_index"][0]))
rank_scores = list()
rank_iids = list()
for img_batch in tqdm.tqdm(image_preload, desc="rank loop"):
_im, _iid = img_batch
img_batch_score = list()
for txt_batch in text_preload:
fblen = len(txt_batch["text_ids"])
im = _im.repeat(fblen, 1, 1, 1).to(device=txt_batch['text_ids'].device)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
},
img=im,
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score.cpu().tolist())
rank_iids.append(_iid)
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def vqa_test_step(pl_module, batch, output):
try:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
except:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["gqa_test"].id2answer
if "gqa_test" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["gqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": True}
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": False}
def arc_test_step(pl_module, batch, output):
return output
def vqa_test_wrapup(outs, model_name):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
gqa = False
for out in outs:
qids += out["qids"]
preds += out["preds"]
gqa = out['gqa']
rets = list()
for qid, pred in zip(qids, preds):
if gqa:
rets.append({"questionId": qid, "prediction": pred})
else:
rets.append({"question_id": qid, "answer": pred})
with open(f"vqa_submit_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob("vqa_submit_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result", exist_ok=True)
with open(f"result/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"vqa_submit_{rank}.json")
def arc_test_wrapup(outs, caplen, model_name):
rank = torch.distributed.get_rank()
iids, captions = list(), list()
for out in outs:
iids += out["iid"]
captions += out["captions"]
rets = list()
for iid, caption in zip(iids, captions):
rets.append({"image_id": iid, "caption": caption})
with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob(f"coco_cap_len{caplen}_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result/arc", exist_ok=True)
jsons = sorted(jsons, key=lambda x: x["image_id"])
with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"coco_cap_len{caplen}_{rank}.json")
| 17,360 | 33.514911 | 88 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/swin_helpers.py | """ Model creation / weight loading / state_dict helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import os
import math
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Optional, Tuple
import torch
import torch.nn as nn
from timm.models.features import FeatureListNet, FeatureDictNet, FeatureHookNet
from timm.models.hub import has_hf_hub, download_cached_file, load_state_dict_from_hf, load_state_dict_from_url
from timm.models.layers import Conv2dSame, Linear
def swin_adapt_position_encoding(model, before=384, patch_size=32, after=384,
suffix='relative_position_bias_table'):
if after == before:
return model
grid_before = int(before/32)
grid_after = int(after/32) #after // patch_size
before = (2*grid_before-1)
import math
after = (2*grid_after-1)
keys = [k for k in model if k.endswith(suffix)]
assert len(keys) > 0
for key in keys:
pos_embed = model[key]
pos_embed = pos_embed.transpose(0, 1).view(-1, before, before)
pos_embed = torch.nn.functional.interpolate(pos_embed.unsqueeze(0), size=(after, after), mode='bicubic')
pos_embed = pos_embed.squeeze(0).permute((1, 2, 0))
pos_embed = pos_embed.contiguous().view(-1, pos_embed.size(-1))
model[key] = pos_embed
keys = [k for k in model if k.endswith('attn_mask')]
for key in keys:
model.pop(key)
keys = [k for k in model if k.endswith('relative_position_index')]
for key in keys:
model.pop(key)
return model
_logger = logging.getLogger(__name__)
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `module.` prefix
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True):
if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'):
# numpy checkpoint, try to load via model specific load_pretrained fn
if hasattr(model, 'load_pretrained'):
model.load_pretrained(checkpoint_path)
else:
raise NotImplementedError('Model cannot load numpy checkpoint')
return
state_dict = load_state_dict(checkpoint_path, use_ema)
model.load_state_dict(state_dict, strict=strict)
def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
if log_info:
_logger.info('Restoring model state from checkpoint...')
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if optimizer is not None and 'optimizer' in checkpoint:
if log_info:
_logger.info('Restoring optimizer state from checkpoint...')
optimizer.load_state_dict(checkpoint['optimizer'])
if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
if log_info:
_logger.info('Restoring AMP loss scaler state from checkpoint...')
loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch']
if 'version' in checkpoint and checkpoint['version'] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
if log_info:
_logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
return resume_epoch
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False):
r"""Loads a custom (read non .pth) weight file
Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls
a passed in custom load fun, or the `load_pretrained` model member fn.
If the object is already present in `model_dir`, it's deserialized and returned.
The default value of `model_dir` is ``<hub_dir>/checkpoints`` where
`hub_dir` is the directory returned by :func:`~torch.hub.get_dir`.
Args:
model: The instantiated model to load weights into
default_cfg (dict): Default pretrained model cfg
load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named
'laod_pretrained' on the model will be called if it exists
progress (bool, optional): whether or not to display a progress bar to stderr. Default: False
check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file. Default: False
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
if not pretrained_url:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress)
if load_fn is not None:
load_fn(model, cached_file)
elif hasattr(model, 'load_pretrained'):
model.load_pretrained(cached_file)
else:
_logger.warning("Valid function to load pretrained weights is not available, using random initialization.")
def adapt_input_conv(in_chans, conv_weight):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
conv_weight = conv_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
# NOTE this strategy should be better than random init, but there could be other combinations of
# the original RGB input layer weights that'd work better for specific cases.
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
conv_weight = conv_weight.to(conv_type)
return conv_weight
def load_pretrained(model, img_size, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False, resolution_before=384):
""" Load pretrained checkpoint
Args:
model (nn.Module) : PyTorch model module
default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset
num_classes (int): num_classes for model
in_chans (int): in_chans for model
filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args)
strict (bool): strict load of checkpoint
progress (bool): enable progress bar for weight download
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
hf_hub_id = default_cfg.get('hf_hub', None)
if not pretrained_url and not hf_hub_id:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
if hf_hub_id and has_hf_hub(necessary=not pretrained_url):
_logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})')
state_dict = load_state_dict_from_hf(hf_hub_id)
else:
_logger.info(f'Loading pretrained weights from url ({pretrained_url})')
state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu')
swin_adapt_position_encoding(state_dict['model'], before=resolution_before, after=img_size)
if filter_fn is not None:
# for backwards compat with filter fn that take one arg, try one first, the two
try:
state_dict = filter_fn(state_dict)
except TypeError:
state_dict = filter_fn(state_dict, model)
input_convs = default_cfg.get('first_conv', None)
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name])
_logger.info(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
_logger.warning(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifiers = default_cfg.get('classifier', None)
label_offset = default_cfg.get('label_offset', 0)
if classifiers is not None:
if isinstance(classifiers, str):
classifiers = (classifiers,)
if num_classes != default_cfg['num_classes']:
for classifier_name in classifiers:
# completely discard fully connected if model num_classes doesn't match pretrained weights
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
elif label_offset > 0:
for classifier_name in classifiers:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
model.load_state_dict(state_dict, strict=strict)
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
if not hasattr(model, 'module') and layer[0] == 'module':
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module
def set_layer(model, layer, val):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
lst_index = 0
module2 = module
for l in layer:
if hasattr(module2, l):
if not l.isdigit():
module2 = getattr(module2, l)
else:
module2 = module2[int(l)]
lst_index += 1
lst_index -= 1
for l in layer[:lst_index]:
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
l = layer[lst_index]
setattr(module, l, val)
def adapt_model_from_string(parent_module, model_string):
separator = '***'
state_dict = {}
lst_shape = model_string.split(separator)
for k in lst_shape:
k = k.split(':')
key = k[0]
shape = k[1][1:-1].split(',')
if shape[0] != '':
state_dict[key] = [int(i) for i in shape]
new_module = deepcopy(parent_module)
for n, m in parent_module.named_modules():
old_module = extract_layer(parent_module, n)
if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
if isinstance(old_module, Conv2dSame):
conv = Conv2dSame
else:
conv = nn.Conv2d
s = state_dict[n + '.weight']
in_channels = s[1]
out_channels = s[0]
g = 1
if old_module.groups > 1:
in_channels = out_channels
g = in_channels
new_conv = conv(
in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
groups=g, stride=old_module.stride)
set_layer(new_module, n, new_conv)
if isinstance(old_module, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(
num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
set_layer(new_module, n, new_bn)
if isinstance(old_module, nn.Linear):
# FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
num_features = state_dict[n + '.weight'][1]
new_fc = Linear(
in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
set_layer(new_module, n, new_fc)
if hasattr(new_module, 'num_features'):
new_module.num_features = num_features
new_module.eval()
parent_module.eval()
return new_module
def adapt_model_from_file(parent_module, model_variant):
adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt')
with open(adapt_file, 'r') as f:
return adapt_model_from_string(parent_module, f.read().strip())
def default_cfg_for_features(default_cfg):
default_cfg = deepcopy(default_cfg)
# remove default pretrained cfg fields that don't have much relevance for feature backbone
to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size?
for tr in to_remove:
default_cfg.pop(tr, None)
return default_cfg
def overlay_external_default_cfg(default_cfg, kwargs):
""" Overlay 'external_default_cfg' in kwargs on top of default_cfg arg.
"""
external_default_cfg = kwargs.pop('external_default_cfg', None)
if external_default_cfg:
default_cfg.pop('url', None) # url should come from external cfg
default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg
default_cfg.update(external_default_cfg)
def set_default_kwargs(kwargs, names, default_cfg):
for n in names:
# for legacy reasons, model __init__args uses img_size + in_chans as separate args while
# default_cfg has one input_size=(C, H ,W) entry
if n == 'img_size':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[-2:])
elif n == 'in_chans':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[0])
else:
default_val = default_cfg.get(n, None)
if default_val is not None:
kwargs.setdefault(n, default_cfg[n])
def filter_kwargs(kwargs, names):
if not kwargs or not names:
return
for n in names:
kwargs.pop(n, None)
def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter):
""" Update the default_cfg and kwargs before passing to model
FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs
could/should be replaced by an improved configuration mechanism
Args:
default_cfg: input default_cfg (updated in-place)
kwargs: keyword args passed to model build fn (updated in-place)
kwargs_filter: keyword arg keys that must be removed before model __init__
"""
# Overlay default cfg values from `external_default_cfg` if it exists in kwargs
overlay_external_default_cfg(default_cfg, kwargs)
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
if default_cfg.get('fixed_input_size', False):
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
default_kwarg_names += ('img_size',)
set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg)
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
filter_kwargs(kwargs, names=kwargs_filter)
def swin_build_model_with_cfg(
model_cls: Callable,
variant: str,
pretrained: bool,
default_cfg: dict,
model_cfg: Optional[Any] = None,
feature_cfg: Optional[dict] = None,
pretrained_strict: bool = True,
pretrained_filter_fn: Optional[Callable] = None,
pretrained_custom_load: bool = False,
kwargs_filter: Optional[Tuple[str]] = None,
**kwargs):
""" Build model with specified default_cfg and optional model_cfg
This helper fn aids in the construction of a model including:
* handling default_cfg and associated pretained weight loading
* passing through optional model_cfg for models with config based arch spec
* features_only model adaptation
* pruning config / model adaptation
Args:
model_cls (nn.Module): model class
variant (str): model variant name
pretrained (bool): load pretrained weights
default_cfg (dict): model's default pretrained/task config
model_cfg (Optional[Dict]): model's architecture config
feature_cfg (Optional[Dict]: feature extraction adapter config
pretrained_strict (bool): load pretrained weights strictly
pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights
pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights
kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model
**kwargs: model args passed through to model __init__
"""
pruned = kwargs.pop('pruned', False)
features = False
feature_cfg = feature_cfg or {}
default_cfg = deepcopy(default_cfg) if default_cfg else {}
update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter)
default_cfg.setdefault('architecture', variant)
# Setup for feature extraction wrapper done at end of this fn
if kwargs.pop('features_only', False):
features = True
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
if 'out_indices' in kwargs:
feature_cfg['out_indices'] = kwargs.pop('out_indices')
# Build the model
model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs)
model.default_cfg = default_cfg
if pruned:
model = adapt_model_from_file(model, variant)
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
if pretrained:
if pretrained_custom_load:
load_custom_pretrained(model)
else:
load_pretrained(
model,
num_classes=num_classes_pretrained,
in_chans=kwargs.get('in_chans', 3),
filter_fn=pretrained_filter_fn,
img_size=kwargs['img_size'],
strict=pretrained_strict,
resolution_before=kwargs['config']['resolution_before'])
# Wrap the model in a feature extraction module if enabled
if features:
feature_cls = FeatureListNet
if 'feature_cls' in feature_cfg:
feature_cls = feature_cfg.pop('feature_cls')
if isinstance(feature_cls, str):
feature_cls = feature_cls.lower()
if 'hook' in feature_cls:
feature_cls = FeatureHookNet
else:
assert False, f'Unknown feature class {feature_cls}'
model = feature_cls(model, **feature_cfg)
model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg
return model
def model_parameters(model, exclude_head=False):
if exclude_head:
# FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
return [p for p in model.parameters()][:-2]
else:
return model.parameters()
def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
def named_modules(module: nn.Module, name='', depth_first=True, include_root=False):
if not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
yield name, module
| 23,550 | 43.519849 | 153 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/heads.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
x = self.fc(x)
return x
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
| 1,257 | 27.590909 | 83 | py |
Mr.Right | Mr.Right-main/models/METER/meter/transforms/transform.py | from .utils import (
inception_normalize,
imagenet_normalize,
MinMaxResize,
)
from PIL import Image
from torchvision import transforms
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from .randaug import RandAugment
def pixelbert_transform(size=800):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def imagenet_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
def imagenet_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def vit_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
def vit_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def clip_transform(size):
return Compose([
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def clip_transform_randaug(size):
trs = Compose([
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
trs.transforms.insert(0, lambda image: image.convert('RGBA'))
trs.transforms.insert(0, RandAugment(2, 9))
trs.transforms.insert(0, lambda image: image.convert('RGB'))
return trs
| 2,733 | 26.34 | 93 | py |
Mr.Right | Mr.Right-main/models/METER/meter/transforms/utils.py | from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min, scale * w
else:
newh, neww = scale * h, self.min
if max(newh, neww) > self.max:
scale = self.max / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
newh, neww = newh // 32 * 32, neww // 32 * 32
return x.resize((neww, newh), resample=Image.BICUBIC)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# This is simple maximum entropy normalization performed in Inception paper
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ViT uses simple non-biased inception normalization
# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ImageNet normalize
imagenet_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
)
| 1,792 | 27.919355 | 98 | py |
Mr.Right | Mr.Right-main/models/METER/meter/transforms/randaug.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
| 6,990 | 24.892593 | 134 | py |
Mr.Right | Mr.Right-main/models/ALBEF/models/xbert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
import transformers
transformers.logging.set_verbosity_error()
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.config = config
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.has_cross_attention = (layer_num >= config.fusion_layer)
if self.has_cross_attention:
self.layer_num = layer_num
self.crossattention = BertAttention(config, is_cross_attention=True)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if self.has_cross_attention:
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
if type(encoder_hidden_states) == list:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
encoder_attention_mask[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
else:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
mode='multi_modal',
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
if mode=='text':
start_layer = 0
output_layer = self.config.fusion_layer
elif mode=='fusion':
start_layer = self.config.fusion_layer
output_layer = self.config.num_hidden_layers
elif mode=='multi_modal':
start_layer = 0
output_layer = self.config.num_hidden_layers
for i in range(start_layer, output_layer):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mode=mode,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=-1)*soft_labels,dim=-1)
loss_distill = (loss_distill * (labels!=-100)).sum(1)
lm_loss = (1-alpha)*lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_embeds=encoder_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=-1)*soft_labels,dim=-1)
loss_distill = loss_distill[labels!=-100].mean()
masked_lm_loss = (1-alpha)*masked_lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 82,187 | 41.873239 | 213 | py |
Mr.Right | Mr.Right-main/models/ALBEF/models/vit.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import _cfg, PatchEmbed
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_gradients = None
self.attention_map = None
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(self, x, register_hook=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if register_hook:
self.save_attention_map(attn)
attn.register_hook(self.save_attn_gradients)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, register_hook=False):
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x, register_blk=-1):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x)
for i,blk in enumerate(self.blocks):
x = blk(x, register_blk==i)
x = self.norm(x)
return x
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
return pos_embed_checkpoint
| 8,558 | 41.162562 | 118 | py |
Mr.Right | Mr.Right-main/models/ALBEF/models/model_retrieval.py | from functools import partial
from models.ALBEF.models.vit import VisionTransformer
from models.ALBEF.models.xbert import BertConfig, BertModel
import torch
from torch import nn
import torch.nn.functional as F
class ALBEF(nn.Module):
def __init__(self,
text_encoder = None,
tokenizer = None,
config = None,
):
super().__init__()
self.tokenizer = tokenizer
embed_dim = config['embed_dim']
vision_width = config['vision_width']
self.visual_encoder = VisionTransformer(
img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12,
mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6))
self.bert_config = BertConfig.from_json_file(config['bert_config'])
self.text_encoder = BertModel.from_pretrained(text_encoder, config=self.bert_config, add_pooling_layer=False)
text_width = self.text_encoder.config.hidden_size
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
def forward(self, doc_image, doc_text, query):
output = dict()
query_output = self.text_encoder(query["input_ids"],
attention_mask=query["attention_mask"],
return_dict=True,
mode='text')
query_embeds = query_output.last_hidden_state
query_feat = F.normalize(self.text_proj(query_embeds[:,0,:]),dim=-1)
image_embeds = self.visual_encoder(doc_image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(doc_image.device)
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
text_output = self.text_encoder(doc_text["input_ids"], attention_mask = doc_text["attention_mask"],
return_dict = True, mode = 'text')
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(self.text_proj(text_embeds[:,0,:]),dim=-1)
multi_output = self.text_encoder(encoder_embeds = text_embeds,
attention_mask = doc_text["attention_mask"],
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
mode = 'fusion',
)
multi_embeds = multi_output.last_hidden_state
multi_atts = torch.ones(multi_embeds.size()[:-1],dtype=torch.long).to(doc_image.device)
multi_feat = F.normalize(self.text_proj(multi_embeds[:,0,:]),dim=-1)
output['query_embeds'] = query_embeds
output['query_cls'] = query_feat
output['query_atts'] = query["attention_mask"]
output['doctext_embeds'] = text_embeds
output['doctext_cls'] = text_feat
output['img_embeds'] = image_embeds
output['img_cls'] = image_feat
output['multi_embeds'] = multi_embeds
output['multi_cls'] = multi_feat
output['multi_atts'] = doc_text["attention_mask"]
return output
| 3,499 | 45.666667 | 129 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/vilt_utils.py | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from models.ViLT.vilt.modules.dist_utils import all_gather
from models.ViLT.vilt.modules.objectives import compute_irtr_recall
from models.ViLT.vilt.gadgets.my_metrics import Accuracy, VQAScore, Scalar
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_irtr_loss", Scalar())
elif k == "mppd" or k == "mpfr":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_wpa_loss", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += ir_r1.item() + tr_r1.item()
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "nlvr2":
if phase == "train":
value = getattr(pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
value = getattr(pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
elif loss_name == "irtr":
pl_module.log(
f"{loss_name}/{phase}/irtr_loss_epoch",
getattr(pl_module, f"{phase}_irtr_loss").compute(),
)
getattr(pl_module, f"{phase}_irtr_loss").reset()
elif loss_name == "mppd" or loss_name == "mpfr":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
pl_module.log(
f"{loss_name}/{phase}/wpa_loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").reset()
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric)
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item()
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v >= 1
]
return
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
]
head_names = ["vqa_classifier", "nlvr2_classifier"]
lr_mult = pl_module.hparams.config["lr_mult"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
names = [n for n, p in pl_module.named_parameters()]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr * lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult,
},
]
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
)
| 10,650 | 37.451264 | 88 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/dist_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 7,814 | 27.837638 | 100 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/objectives.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from models.ViLT.vilt.modules.dist_utils import all_gather
def cost_matrix_cosine(x, y, eps=1e-5):
"""Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x):
""" compute trace of input tensor (batched) """
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False)
return trace
@torch.no_grad()
def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
""" [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2) / beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
def optimal_transport_dist(
txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1
):
""" [B, M, D], [B, N, D], [B, M], [B, N]"""
cost = cost_matrix_cosine(txt_emb, img_emb)
# mask the padded inputs
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k
)
distance = trace(cost.matmul(T.detach()))
return distance
def compute_mlm(pl_module, batch):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret
def compute_mpp(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mpp_logits = pl_module.mpp_score(infer["image_feats"])
mpp_logits = torch.stack(
[
mpp_logits[:, :, 0:256],
mpp_logits[:, :, 256:512],
mpp_logits[:, :, 512:768],
],
dim=2,
)
mpp_labels = infer["image_labels"]
mpp_loss = F.cross_entropy(
mpp_logits.view(-1, 256),
mpp_labels.view(-1),
ignore_index=-100,
)
ret = {
"mpp_loss": mpp_loss,
"mpp_logits": mpp_logits,
"mpp_labels": mpp_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mpp_loss")(ret["mpp_loss"])
acc = getattr(pl_module, f"{phase}_mpp_accuracy")(
ret["mpp_logits"], ret["mpp_labels"]
)
pl_module.log(f"mpp/{phase}/loss", loss)
pl_module.log(f"mpp/{phase}/accuracy", acc)
return ret
def compute_mppd(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mppd_logits = pl_module.mppd_score(infer["image_feats"])
mppd_labels = infer["image_labels_mppd"]
filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
labels = mppd_labels[filter_to_train]
logits = mppd_logits[filter_to_train]
mppd_loss = F.mse_loss(logits, labels)
ret = {
"mppd_loss": mppd_loss,
"mppd_logits": mppd_logits,
"mppd_labels": mppd_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mppd_loss")(ret["mppd_loss"])
pl_module.log(f"mppd/{phase}/loss", loss)
return ret
def compute_mpfr(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mpfr_logits = pl_module.mpfr_score(infer["image_feats"])
mpfr_labels = infer["image_labels_mpfr"]
filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
labels = mpfr_labels[filter_to_train]
logits = mpfr_logits[filter_to_train]
mpfr_loss = F.mse_loss(logits, labels)
ret = {
"mpfr_loss": mpfr_loss,
"mpfr_logits": mpfr_logits,
"mpfr_labels": mpfr_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mpfr_loss")(ret["mpfr_loss"])
pl_module.log(f"mpfr/{phase}/loss", loss)
return ret
def compute_itm_wpa(pl_module, batch):
pos_len = len(batch["text"]) // 2
neg_len = len(batch["text"]) - pos_len
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
itm_images = [
torch.stack(
[
ti if itm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["image"], batch["false_image_0"])
]
batch = {k: v for k, v in batch.items()}
batch["image"] = itm_images
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
with torch.cuda.amp.autocast(enabled=False):
txt_emb, img_emb = infer["text_feats"], infer["image_feats"]
txt_mask, img_mask = infer["text_masks"].bool(), infer["image_masks"].bool()
for i, _len in enumerate(txt_mask.sum(dim=1)):
txt_mask[i, _len - 1] = False
txt_mask[:, 0] = False
img_mask[:, 0] = False
if "deit" in pl_module.hparams.config["vit"]:
img_mask[:, 1] = False
txt_pad, img_pad = ~txt_mask, ~img_mask
cost = cost_matrix_cosine(txt_emb.float(), img_emb.float())
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, 0.5, 50, 1
)
distance = trace(cost.matmul(T.detach()))
dist_pos = distance.masked_select(itm_labels == 1)
dist_neg = distance.masked_select(itm_labels == 0)
ot_loss = (dist_pos.sum() - dist_neg.sum()) / (dist_pos.size(0) + dist_neg.size(0))
itm_logits = pl_module.itm_score(infer["cls_feats"])
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_wpa_loss": 0.1 * ot_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
wpa_loss = getattr(pl_module, f"{phase}_itm_wpa_loss")(ret["itm_wpa_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/wpa_loss", wpa_loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret
def compute_imgcls(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
imgcls_logits = pl_module.img_classifier(infer["cls_feats"])
imgcls_labels = batch["label"]
imgcls_labels = torch.tensor(imgcls_labels).to(pl_module.device).long()
imgcls_loss = F.cross_entropy(imgcls_logits, imgcls_labels)
ret = {
"imgcls_loss": imgcls_loss,
"imgcls_logits": imgcls_logits,
"imgcls_labels": imgcls_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_imgcls_loss")(ret["imgcls_loss"])
acc = getattr(pl_module, f"{phase}_imgcls_accuracy")(
ret["imgcls_logits"], ret["imgcls_labels"]
)
pl_module.log(f"imgcls/{phase}/loss", loss)
pl_module.log(f"imgcls/{phase}/accuracy", acc)
return ret
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels)
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret
def compute_irtr(pl_module, batch):
is_training_phase = pl_module.training
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w)
infer = pl_module.infer(
{
"image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
phase = "train" if pl_module.training else "val"
irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
return ret
@torch.no_grad()
def compute_irtr_recall(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
(ie, im, _, _) = pl_module.transformer.visual_embed(
_b["image"][0].to(pl_module.device),
max_image_len=pl_module.hparams.config["max_image_len"],
mask_it=False,
)
image_preload.append((ie, im, _b["img_index"][0]))
rank_scores = list()
rank_iids = list()
for img_batch in tqdm.tqdm(image_preload, desc="rank loop"):
_ie, _im, _iid = img_batch
_, l, c = _ie.shape
img_batch_score = list()
for txt_batch in text_preload:
fblen = len(txt_batch["text_ids"])
ie = _ie.expand(fblen, l, c)
im = _im.expand(fblen, l)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
},
image_embeds=ie,
image_masks=im,
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score.cpu().tolist())
rank_iids.append(_iid)
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def vqa_test_step(pl_module, batch, output):
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds}
def arc_test_step(pl_module, batch, output):
return output
def vqa_test_wrapup(outs, model_name):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
for out in outs:
qids += out["qids"]
preds += out["preds"]
rets = list()
for qid, pred in zip(qids, preds):
rets.append({"question_id": qid, "answer": pred})
with open(f"vqa_submit_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob("vqa_submit_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result", exist_ok=True)
with open(f"result/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"vqa_submit_{rank}.json")
def arc_test_wrapup(outs, caplen, model_name):
rank = torch.distributed.get_rank()
iids, captions = list(), list()
for out in outs:
iids += out["iid"]
captions += out["captions"]
rets = list()
for iid, caption in zip(iids, captions):
rets.append({"image_id": iid, "caption": caption})
with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob(f"coco_cap_len{caplen}_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result/arc", exist_ok=True)
jsons = sorted(jsons, key=lambda x: x["image_id"])
with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"coco_cap_len{caplen}_{rank}.json")
| 22,098 | 32.842266 | 88 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/vilt_module.py | import torch
torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import pytorch_lightning as pl
import models.ViLT.vilt.modules.vision_transformer as vit
import pdb
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from models.ViLT.vilt.modules import heads, objectives
# from models.ViLT.vilt.modules import vilt_utils
class ViLTransformerSS(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
self.bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
self.text_embeddings = BertEmbeddings(self.bert_config)
self.text_embeddings.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, config["hidden_size"])
self.token_type_embeddings.apply(objectives.init_weights)
if self.hparams.config["checkpoint"] == "":
self.transformer = getattr(vit, self.hparams.config["vit"])(
pretrained=True, config=self.hparams.config
)
else:
self.transformer = getattr(vit, self.hparams.config["vit"])(
pretrained=False, config=self.hparams.config
)
self.pooler = heads.Pooler(config["hidden_size"])
self.pooler.apply(objectives.init_weights)
if config["loss_names"]["mlm"] > 0:
self.mlm_score = heads.MLMHead(self.bert_config)
self.mlm_score.apply(objectives.init_weights)
if config["loss_names"]["itm"] > 0:
self.itm_score = heads.ITMHead(config["hidden_size"])
self.itm_score.apply(objectives.init_weights)
if config["loss_names"]["mpp"] > 0:
self.mpp_score = heads.MPPHead(self.bert_config)
self.mpp_score.apply(objectives.init_weights)
# ===================== Downstream ===================== #
hs = self.hparams.config["hidden_size"]
if self.hparams.config["loss_names"]["vqa"] > 0:
vs = self.hparams.config["vqav2_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Linear(hs, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["nlvr2"] > 0:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(3, hs)
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
if self.hparams.config["loss_names"]["irtr"] > 0:
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.itm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.itm_score.fc.bias.data[1:]
self.margin = 0.2
for p in self.itm_score.parameters():
p.requires_grad = False
# vilt_utils.set_metrics(self)
self.current_tasks = list()
# ===================== load downstream (test_only) ======================
if self.hparams.config["checkpoint"] != "":
ckpt = torch.load(self.hparams.config["checkpoint"], map_location="cpu")
state_dict = ckpt['state_dict']
# interpolate position embedding
org_embeds = state_dict['text_embeddings.position_embeddings.weight']
org_tokens_len = org_embeds.shape[0]
new_tokens_len = self.text_embeddings.position_embeddings.weight.shape[0]
if org_tokens_len > new_tokens_len:
state_dict['text_embeddings.position_embeddings.weight'] = org_embeds[:new_tokens_len,:]
else:
extra_tokens = new_tokens_len - org_tokens_len
extra_tokens_embeds = nn.Embedding(extra_tokens, self.hparams.config["hidden_size"])
extra_tokens_embeds_weight = extra_tokens_embeds.weight
state_dict['text_embeddings.position_embeddings.weight'] = torch.cat([org_embeds,extra_tokens_embeds_weight],dim=0)
state_dict['text_embeddings.position_ids'] = self.text_embeddings.position_ids
msg = self.load_state_dict(state_dict, strict=False)
print(msg)
def infer(
self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
image_embeds=None,
image_masks=None,
):
imgkey = "image"
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
# text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_embeddings(text_ids)
query_ids = batch[f"query_ids"]
query_embeds = self.text_embeddings(query_ids)
query_masks = batch[f"query_masks"]
check = torch.isnan(query_embeds)
# print(-1,torch.isnan(check).any())
# print((check == True).nonzero(as_tuple=True))
for i, blk in enumerate(self.transformer.blocks):
query_embeds, _ = blk(query_embeds, mask=query_masks)
query_embeds = self.transformer.norm(query_embeds)
if image_embeds is None and image_masks is None:
img = batch[imgkey]
(
image_embeds,
image_masks,
patch_index,
image_labels,
) = self.transformer.visual_embed(
img,
max_image_len=self.hparams.config["max_image_len"],
mask_it=mask_image,
)
else:
patch_index, image_labels = (
None,
None,
)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds + self.token_type_embeddings(torch.full_like(image_masks, image_token_type_idx)),
)
# co_embeds = text_embeds
# co_masks = text_masks
co_embeds = torch.cat([text_embeds, image_embeds], dim=1)
co_masks = torch.cat([text_masks, image_masks], dim=1)
x = co_embeds
for i, blk in enumerate(self.transformer.blocks):
x, _attn = blk(x, mask=co_masks)
x = self.transformer.norm(x)
text_feats, image_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1] :],
)
# text_feats, image_feats = x, None
cls_feats = self.pooler(x)
ret = {
"query_embeds": query_embeds,
"query_atts": query_masks,
"query_cls": query_embeds[:,0,:],
"text_feats": text_feats,
"image_feats": image_feats,
"multi_cls": cls_feats,
"raw_cls_feats": x[:, 0],
"image_labels": image_labels,
"image_atts": image_masks,
"multi_embeds": x,
"multi_atts": co_masks,
# "text_labels": text_labels,
# "text_ids": text_ids,
"text_masks": text_masks,
"patch_index": patch_index,
}
return ret
@torch.no_grad()
def output_multi(self,batch,image_token_type_idx=1):
imgkey = "image"
text_ids = batch["text_ids"]
text_masks = batch["text_masks"]
text_embeds = self.text_embeddings(text_ids)
img = batch[imgkey]
(
image_embeds,
image_masks,
patch_index,
image_labels,
) = self.transformer.visual_embed(
img,
max_image_len=self.hparams.config["max_image_len"],
mask_it=False,
)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds
+ self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
),
)
# co_embeds = text_embeds
# co_masks = text_masks
co_embeds = torch.cat([text_embeds, image_embeds], dim=1)
co_masks = torch.cat([text_masks, image_masks], dim=1)
x = co_embeds
for i, blk in enumerate(self.transformer.blocks):
x, _attn = blk(x, mask=co_masks)
x = self.transformer.norm(x)
text_feats, image_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1] :],
)
# text_feats, image_feats = x, None
cls_feats = self.pooler(x)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"multi_cls": cls_feats,
"raw_cls_feats": x[:, 0],
"image_labels": image_labels,
"image_atts": image_masks,
"multi_embeds": x,
"multi_atts": co_masks,
# "text_labels": text_labels,
# "text_ids": text_ids,
"text_masks": text_masks,
"patch_index": patch_index,
}
return ret
def forward(self, batch):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ret.update(objectives.compute_mlm(self, batch))
# Masked Patch Prediction
if "mpp" in self.current_tasks:
ret.update(objectives.compute_mpp(self, batch))
# Image Text Matching
if "itm" in self.current_tasks:
ret.update(objectives.compute_itm_wpa(self, batch))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch))
# Natural Language for Visual Reasoning 2
if "nlvr2" in self.current_tasks:
ret.update(objectives.compute_nlvr2(self, batch))
# Image Retrieval and Text Retrieval
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch))
return ret
def training_step(self, batch, batch_idx):
# vilt_utils.set_task(self)
output = self(batch)
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
pass
# vilt_utils.epoch_wrapup(self)
def validation_step(self, batch, batch_idx):
# vilt_utils.set_task(self)
output = self(batch)
def validation_epoch_end(self, outs):
pass
# vilt_utils.epoch_wrapup(self)
def test_step(self, batch, batch_idx):
# vilt_utils.set_task(self)
output = self(batch)
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["checkpoint"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, model_name)
# vilt_utils.epoch_wrapup(self)
def configure_optimizers(self):
pass
# return vilt_utils.set_schedule(self)
| 10,172 | 28.148997 | 119 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/vision_transformer.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
_logger = logging.getLogger(__name__)
def download_clip(
url: str = "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
root: str = os.path.expanduser("~/.cache/clip"),
):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 224, 224),
"pool_size": None,
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
"first_conv": "patch_embed.proj",
"classifier": "head",
**kwargs,
}
default_cfgs = {
# patch models (my experiments)
"vit_small_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth",
),
# patch models (weights ported from official Google JAX impl)
"vit_base_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_base_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
# patch models, imagenet21k (weights ported from official Google JAX impl)
"vit_base_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_huge_patch14_224_in21k": _cfg(
url="", # FIXME I have weights for this but > 2GB limit for github release binaries
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
# hybrid models (weights ported from official Google JAX impl)
"vit_base_resnet50_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=0.9,
first_conv="patch_embed.backbone.stem.conv",
),
"vit_base_resnet50_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
first_conv="patch_embed.backbone.stem.conv",
),
# hybrid models (my experiments)
"vit_small_resnet26d_224": _cfg(),
"vit_small_resnet50d_s3_224": _cfg(),
"vit_base_resnet26d_224": _cfg(),
"vit_base_resnet50d_224": _cfg(),
# deit models (FB weights)
"vit_deit_tiny_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth"
),
"vit_deit_small_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth"
),
"vit_deit_base_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
),
"vit_deit_base_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
"vit_deit_tiny_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth"
),
"vit_deit_small_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth"
),
"vit_deit_base_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
),
"vit_deit_base_distilled_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
}
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask = mask.bool()
attn = attn.masked_fill(~mask[:, None, None, :], float("-inf"))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x, mask=None):
_x, attn = self.attn(self.norm1(x), mask=mask)
x = x + self.drop_path(_x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
no_patch_embed_bias=False,
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=patch_size,
bias=False if no_patch_embed_bias else True,
)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
x = self.proj(x)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
representation_size=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
add_norm_before_transformer=False,
no_patch_embed_bias=False,
config=None,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
drop_rate = drop_rate if config is None else config["drop_rate"]
self.num_classes = num_classes
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.add_norm_before_transformer = add_norm_before_transformer
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
self.patch_dim = img_size // patch_size
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if add_norm_before_transformer:
self.pre_norm = norm_layer(embed_dim)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def mask_tokens(self, orig_image, feats):
"""
Prepare masked tokens inputs/labels for masked patch prediction: 80% MASK, 10% random, 10% original.
"""
img_unnorm = orig_image * 0.5 + 0.5
_, _, ph, pw = self.patch_embed.proj.weight.shape
with torch.no_grad():
img_unnorm_patch = F.conv2d(
img_unnorm,
weight=torch.ones(3, 1, ph, pw).to(img_unnorm) / (ph * pw),
bias=None,
stride=(ph, pw),
padding=0,
groups=3,
)
labels = (
((img_unnorm_patch * 255).long().flatten(start_dim=2, end_dim=3))
.permute(0, 2, 1)
.contiguous()
)
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape[:-1], 0.15)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape[:-1], 0.8)).bool() & masked_indices
)
feats[indices_replaced] = self.mask_token.to(feats)
return feats, labels
def visual_embed(self, _x, max_image_len=200, mask_it=False):
_, _, ph, pw = self.patch_embed.proj.weight.shape
x = self.patch_embed(_x)
x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :]
x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
B, C, H, W = x.shape
spatial_pos = (
self.pos_embed[:, 1:, :]
.transpose(1, 2)
.view(1, C, self.patch_dim, self.patch_dim)
)
pos_embed = torch.cat(
[
F.pad(
F.interpolate(
spatial_pos, size=(h, w), mode="bilinear", align_corners=True,
),
(0, W - w, 0, H - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
patch_index = (
torch.stack(
torch.meshgrid(
torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1])
),
dim=-1,
)[None, None, :, :, :]
.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
.flatten(1, 3)
)
x_mask = x_mask.flatten(1)
if mask_it:
x, label = self.mask_tokens(_x, x)
if (
max_image_len < 0
or max_image_len is None
or not isinstance(max_image_len, int)
):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
eff = x_h * x_w
max_image_len = eff.max()
else:
eff = x_h * x_w
max_image_len = min(eff.max(), max_image_len)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [
non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows
]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_len - v for v in valid_nums]
select = list()
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(
torch.ones(nv).float(), p, replacement=True
)
select.append(
torch.cat(
[valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0,
)
)
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(B, -1, C)
x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1)
patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C)
if mask_it:
label = label[select[:, 0], select[:, 1]].view(B, -1, 3)
label[x_mask == 0] = -100
label = torch.cat(
[torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1,
)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = torch.cat(
(self.pos_embed[:, 0, :][:, None, :].expand(B, -1, -1), pos_embed), dim=1
)
x = x + pos_embed
x = self.pos_drop(x)
if self.add_norm_before_transformer:
x = self.pre_norm(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1)
if mask_it:
return x, x_mask, (patch_index, (H, W)), label
else:
return x, x_mask, (patch_index, (H, W)), None
def forward_features(self, _x, max_image_len=144, mask_it=False):
x, x_mask, patch_index, label = self.visual_embed(
_x, max_image_len=max_image_len, mask_it=mask_it
)
for blk in self.blocks:
x, _ = blk(x, mask=x_mask)
x = self.norm(x)
return x, x_mask, label
def forward(self, x, max_image_len=-1):
x, _, _ = self.forward_features(x, max_image_len=max_image_len)
x = x[:, 0]
x = self.head(x)
return x
class DistilledVisionTransformer(VisionTransformer):
""" Vision Transformer with distillation token.
Paper: `Training data-efficient image transformers & distillation through attention` -
https://arxiv.org/abs/2012.12877
This impl of distilled ViT is taken from https://github.com/facebookresearch/deit
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
trunc_normal_(self.dist_token, std=0.02)
trunc_normal_(self.pos_embed, std=0.02)
def visual_embed(self, _x, max_image_len=200, mask_it=False):
_, _, ph, pw = self.patch_embed.proj.weight.shape
x = self.patch_embed(_x)
x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :]
x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
B, C, H, W = x.shape
spatial_pos = (
self.pos_embed[:, 2:, :]
.transpose(1, 2)
.view(1, C, self.patch_dim, self.patch_dim)
)
pos_embed = torch.cat(
[
F.pad(
F.interpolate(
spatial_pos, size=(h, w), mode="bilinear", align_corners=True,
),
(0, W - w, 0, H - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
patch_index = (
torch.stack(
torch.meshgrid(
torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1])
),
dim=-1,
)[None, None, :, :, :]
.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
.flatten(1, 3)
)
x_mask = x_mask.flatten(1)
if mask_it:
x, label = self.mask_tokens(_x, x)
if (
max_image_len < 0
or max_image_len is None
or not isinstance(max_image_len, int)
):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
eff = x_h * x_w
max_image_len = eff.max()
else:
eff = x_h * x_w
max_image_len = min(eff.max(), max_image_len)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [
non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows
]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_len - v for v in valid_nums]
select = list()
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(
torch.ones(nv).float(), p, replacement=True
)
select.append(
torch.cat(
[valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0,
)
)
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(B, -1, C)
x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1)
patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C)
if mask_it:
label = label[select[:, 0], select[:, 1]].view(B, -1, 3)
label[x_mask == 0] = -100
label = torch.cat(
[torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1,
)
cls_tokens = self.cls_token.expand(B, -1, -1)
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
pos_embed = torch.cat(
(self.pos_embed[:, :2, :].expand(B, -1, -1), pos_embed), dim=1
)
x = x + pos_embed
x = self.pos_drop(x)
if self.add_norm_before_transformer:
x = self.pre_norm(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 2).to(x_mask), x_mask], dim=1)
if mask_it:
return x, x_mask, (patch_index, (H, W)), label
else:
return x, x_mask, (patch_index, (H, W)), None
def forward_features(self, _x, max_image_len=144, mask_it=False):
x, x_mask, patch_index, label = self.visual_embed(
_x, max_image_len=max_image_len, mask_it=mask_it
)
for blk in self.blocks:
x, _ = blk(x, mask=x_mask)
x = self.norm(x)
return x, x_mask, label
def forward(self, x, max_image_len=-1):
x, _, _ = self.forward_features(x, max_image_len=max_image_len)
x = x[:, 0]
x = self.head(x)
return x
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
_logger.info("Position embedding grid-size from %s to %s", gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode="bilinear")
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if "model" in state_dict:
# For deit models
state_dict = state_dict["model"]
for k, v in state_dict.items():
if "patch_embed.proj.weight" in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == "pos_embed" and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3."""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3.0,
qkv_bias=False,
norm_layer=nn.LayerNorm,
**kwargs,
)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault("qk_scale", 768 ** -0.5)
model = _create_vision_transformer(
"vit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: converted weights not currently available, too large for github release hosting.
"""
model_kwargs = dict(
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
representation_size=1280,
**kwargs,
)
model = _create_vision_transformer(
"vit_huge_patch14_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_224_in21k(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768,
depth=12,
num_heads=12,
hybrid_backbone=backbone,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_resnet50_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_384(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet50d_s3_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[3],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet50d_s3_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_384",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
| 49,034 | 34.558376 | 155 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/heads.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
x = self.fc(x)
return x
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
class MPPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, 256 * 3)
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
| 1,569 | 27.035714 | 83 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/transforms/utils.py | from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min, scale * w
else:
newh, neww = scale * h, self.min
if max(newh, neww) > self.max:
scale = self.max / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
newh, neww = newh // 32 * 32, neww // 32 * 32
return x.resize((neww, newh), resample=Image.BICUBIC)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# This is simple maximum entropy normalization performed in Inception paper
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ViT uses simple non-biased inception normalization
# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
| 1,645 | 27.877193 | 98 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/transforms/randaug.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
| 6,990 | 24.892593 | 134 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/transforms/pixelbert.py | from .utils import (
inception_normalize,
MinMaxResize,
)
from torchvision import transforms
from .randaug import RandAugment
def pixelbert_transform(size=800):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
| 714 | 22.064516 | 54 | py |
Mr.Right | Mr.Right-main/data/data_module.py | import random
import torch
import os
import json
import pickle
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Compose, ToTensor, Normalize, Resize, RandomResizedCrop, RandomHorizontalFlip
from pytorch_lightning import LightningDataModule
from data.utils import pre_caption, RandomAugment
from PIL import Image,ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class TextToMultiDataset(Dataset):
def __init__(self, args, configs, data, task, tokenizer):
self.data = data
self.image_size = configs.image_res
self.image_root = configs["image_root"]
self.task = task
self.tokenizer = tokenizer
self.configs = configs
self.args = args
self.q_max_len = configs.query_length
self.d_max_len = configs.text_length
normalize = Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711))
if task == "train":
self.transform = Compose([
RandomResizedCrop(self.image_size,
scale=(0.5, 1.),
interpolation=Image.BICUBIC),
RandomHorizontalFlip(),
RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
ToTensor(),
normalize,
])
elif task == "val_queries":
self.data = self.data
elif task == 'test_queries':
self.data = self.data
elif task == "docs":
self.data = self.data
self.transform = Compose([
Resize((self.image_size,self.image_size),
interpolation=Image.BICUBIC),
ToTensor(),
normalize,
])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
result = {}
data = self.data[index]
if self.task == "train":
# query str
result['query_text_str'] = data["txt_query_str"]
result['query_image_str'] = data["img_query_str"]
r = random.random()
if r < 0.33:
query_str = data["txt_query_str"]
elif 0.33 < r < 0.66:
query_str = data["img_query_str"]
elif 0.66< r < 0.83:
query_str = data["txt_query_str"] + data["img_query_str"]
else:
query_str = data["img_query_str"] + data["txt_query_str"]
image = data["doc_image"]
image = image.replace("./","")
image_path = os.path.join(self.image_root,image)
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
result['query_str'] = query_str
result['doc_id'] = data["doc_id"]
result['doc_str'] = data["doc_text_str"]
result['doc_image'] = image
result['image_path'] = image_path
elif self.task == 'val_queries':
result['img_query_str'] = data['img_query_str']
result['txt_query_str'] = data['txt_query_str']
result['multi_query_str'] = data['multi_query_str']
result['doc_id'] = data["doc_id"]
elif self.task == 'test_queries':
result['img_query_str'] = data['img_query_str']
result['txt_query_str'] = data['txt_query_str']
result['multi_query_str'] = data['multi_query_str']
result['doc_id'] = data["doc_id"]
elif self.task == 'docs':
image = data["doc_image"]
image = image.replace("./","")
image_path = os.path.join(self.image_root,image)
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
result['doc_str'] = data["doc_text_str"]
result['doc_image'] = image
result['image_path'] = image_path
result['doc_id'] = data["doc_id"]
return result
def collate_fn(self, batch):
if self.task == 'test_queries' or self.task == 'val_queries':
batch_dict = {
"doc_id": torch.tensor([b["doc_id"] for b in batch]).long(),
"img_query_str": [b["img_query_str"] for b in batch],
"txt_query_str": [b["txt_query_str"] for b in batch],
"multi_query_str": [b["multi_query_str"] for b in batch],
}
img_query_str_tensor = self.tokenizer(
text=[b["img_query_str"] for b in batch],
max_length=self.q_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
txt_query_str_tensor = self.tokenizer(
text=[b["txt_query_str"] for b in batch],
max_length=self.q_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
multi_query_str_tensor = self.tokenizer(
text=[b["multi_query_str"] for b in batch],
max_length=self.q_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["img_query_str_tensor"] = img_query_str_tensor
batch_dict["txt_query_str_tensor"] = txt_query_str_tensor
batch_dict["multi_query_str_tensor"] = multi_query_str_tensor
return batch_dict
if self.task == 'docs':
batch_dict = {
"doc_id": torch.tensor([b["doc_id"] for b in batch]).long(),
"image_path": [b["image_path"] for b in batch],
"doc_str" : [b["doc_str"] for b in batch],
"doc_image_tensor": torch.stack([b["doc_image"] for b in batch]),
}
doc_str_tensor = self.tokenizer(
text=[b["doc_str"] for b in batch],
max_length=self.d_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["doc_str_tensor"] = doc_str_tensor
return batch_dict
batch_dict = {
"doc_id": torch.tensor([b["doc_id"] for b in batch]).long(),
"query_str": [b["query_str"] for b in batch],
"doc_str" : [b["doc_str"] for b in batch],
"doc_image_tensor": torch.stack([b["doc_image"] for b in batch]),
}
query_str_tensor = self.tokenizer(
text=[b["query_str"] for b in batch],
max_length=self.q_max_len,
padding="longest" if self.task == "train" else "max_length",
# padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["query_str_tensor"] = query_str_tensor
doc_str_tensor = self.tokenizer(
text=[b["doc_str"] for b in batch],
max_length=self.d_max_len,
padding="longest" if self.task == "train" else "max_length",
# padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["doc_str_tensor"] = doc_str_tensor
query_str_tensor_total = query_str_tensor
if self.task == "train":
query_str_tensor_total = self.tokenizer(
text=[b["query_text_str"] + ' ' + b["query_image_str"] for b in batch],
max_length=self.q_max_len,
padding="longest",
# padding="max_length",
truncation=True,
return_tensors="pt")
if self.args.ctx_prediction:
context_labels = torch.zeros(len(batch), self.tokenizer.vocab_size)
pad_id = self.tokenizer.pad_token_id
sep_id = self.tokenizer.sep_token_id
cls_id = self.tokenizer.cls_token_id
context_labels[torch.arange(len(batch)).unsqueeze(1), query_str_tensor_total['input_ids']] = 1
context_labels[torch.arange(len(batch)).unsqueeze(1), doc_str_tensor['input_ids']] = 1
context_labels[:, pad_id] = 0
context_labels[:, sep_id] = 0
context_labels[:, cls_id] = 0
batch_dict["context_labels"] = context_labels
return batch_dict
class TextToMultiDataModule(LightningDataModule):
def __init__(self,args,configs,tokenizer):
super().__init__()
self.args = args
self.shuffle = args.shuffle
self.batch_size = args.batch_size
self.num_workers = args.num_workers
self.configs = configs
self.max_len = configs.text_length
self.tokenizer = tokenizer
def prepare_data(self,train=None,val=None,test=None,document=None):
# called only on 1 GPU
def load_data(config, mode):
datas = self.prepare_text2multi_data(config,mode)
return datas
if train is not None: self.train_datas = load_data(train, 'train')
if val is not None: self.val_datas = load_data(val, 'val')
if test is not None: self.test_datas = load_data(test, 'test')
if document is not None: self.documents = load_data(document,'doc')
def setup(self):
if hasattr(self, 'train_datas'):
self.train_dataset = TextToMultiDataset(self.args, self.configs, self.train_datas,"train", self.tokenizer)
if hasattr(self, 'val_datas'):
self.val_queries_dataset = TextToMultiDataset(self.args, self.configs, self.val_datas, "val_queries", self.tokenizer)
self.val_docs_dataset = TextToMultiDataset(self.args, self.configs, self.documents, "docs", self.tokenizer)
if hasattr(self, 'test_datas'):
self.test_queries_dataset = TextToMultiDataset(self.args, self.configs, self.test_datas, "test_queries", self.tokenizer)
self.test_docs_dataset = TextToMultiDataset(self.args, self.configs, self.documents, "docs", self.tokenizer)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers,pin_memory=True,drop_last=True, collate_fn=self.train_dataset.collate_fn)
def val_dataloader(self):
return [
DataLoader(self.val_queries_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.val_queries_dataset.collate_fn),
DataLoader(self.val_docs_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.val_docs_dataset.collate_fn),
]
def test_dataloader(self):
return [
DataLoader(self.test_queries_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.test_queries_dataset.collate_fn),
DataLoader(self.test_docs_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.test_docs_dataset.collate_fn),
]
def prepare_text2multi_data(self,files,task="train"):
print("\nReading json files")
data = []
for f in files:
print(f"File: {f}",end="\r")
data += json.load(open(f,'r'))
result = []
for idx,pairs in enumerate(data):
if task == "train":
result.append({
"img_query_str": pre_caption(pairs["query_img"],self.max_len),
"txt_query_str": pre_caption(pairs["query_text"],self.max_len),
"doc_text_str": pre_caption(pairs["doc_text"], self.max_len),
"doc_image": pairs["doc_image"],
"doc_id": idx,
})
if task == "val":
result.append({
"multi_query_str": pre_caption(pairs["query_multi"],self.max_len),
"img_query_str": pre_caption(pairs["query_img"],self.max_len),
"txt_query_str": pre_caption(pairs["query_text"],self.max_len),
"doc_id": pairs["id"],
})
if task == "test":
result.append({
"img_query_str": pre_caption(pairs["query_img"],self.max_len),
"txt_query_str": pre_caption(pairs["query_text"],self.max_len),
"multi_query_str": pre_caption(pairs["query_multi"],self.max_len),
"doc_id": pairs["id"],
})
if task == "doc":
result.append({
"doc_text_str": pre_caption(pairs["doc_text"], self.max_len),
"doc_image": pairs["doc_image"],
"doc_id": pairs["id"],
})
return result
| 13,856 | 44.136808 | 176 | py |
NORPPA | NORPPA-main/config.py | import os
import sys
from pathlib import Path
import cv2
import numpy as np
file_folder = Path(__file__).resolve().parent
sys.path.append(str(file_folder / "reidentification/hesaff_pytorch"))
from HessianAffinePatches import init_affnet, init_orinet, init_hardnet
from segmentation.detectron_segment import create_predictor
from pattern_extraction.extract_pattern import create_unet
from torchvision.datasets.utils import download_url
from sql import create_connection
def init_file(path, url, allow_download=True):
if Path(path).exists():
return path
elif allow_download:
download_url(url, Path(path).parent, Path(path).name)
return path
else:
raise Exception("The file {path} is not found!")
def config(use_cuda=True, allow_download=True):
config = {}
base_dir = Path(__file__).resolve().parent
mount_path = "/ekaterina/work/data/"
path_db = mount_path + "DB.db"
config["conn"] = create_connection(path_db)
config["detectron_predictor"] = create_predictor(init_file(base_dir/"models/R-101-FPN_150ims.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/R-101-FPN_150ims.pth",
allow_download=allow_download),
not use_cuda )
config["unet"] = create_unet(init_file(base_dir/"models/unet_seals_512.hdf5",
"https://github.com/kwadraterry/NORPPA/raw/models/models/unet_seals_512.hdf5",
allow_download=allow_download))
config["net"] = init_hardnet(init_file(base_dir/"models/HardNet++.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/HardNet++.pth",
allow_download=allow_download),
use_cuda=use_cuda)
affnet = init_affnet(init_file(base_dir/"models/AffNet.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/AffNet.pth",
allow_download=allow_download),
use_cuda=use_cuda)
orinet = init_orinet(init_file(base_dir/"models/OriNet.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/OriNet.pth",
allow_download=allow_download),
use_cuda=use_cuda)
codebooks_path = init_file(base_dir/'codebooks/codebooks.pickle',
"https://github.com/kwadraterry/NORPPA/raw/models/codebooks/codebooks.pickle",
allow_download=allow_download)
config["codebooks_path"] = Path(base_dir/"codebooks/norppa.pickle")
config["codebooks"] = None
config["hesaff_args"] = {'init_sigma': 1.3213713243956968,
'mrSize': 9.348280997446642,
'nlevels': 10,
'num_features': 480,
'unsharp_amount': 6.80631647207343,
'unsharp_radius': None,
'use_cuda' :use_cuda}
config["hesaff_args"]["AffNet"] = affnet
config["hesaff_args"]["OriNet"] = orinet
config["hesaff_args"]["patch_size"] = 32
config["use_hesaff"] = True
config["pca"] = 64
config["use_kpca"] = False
config["n_clusters"] = 1400
config["features_shape"] = 64
config["topk"] = 10
config["kernel"] = "rbf"
config["use_cuda"] = use_cuda
config["dataset_dir"] = base_dir/'data'
config["sequence_dataset_dir"] = '/ekaterina/work/data/many_dataset/original_small'
config["batch_size"] = 256
config["geometric"] = {
"method": cv2.RANSAC,
"max_iters": 5000,
"max_reproj_err": 0.2,
"estimator": lambda d, mask: d ** np.sum(mask)
}
return config | 4,039 | 41.526316 | 123 | py |
NORPPA | NORPPA-main/datasets.py | import os
from pathlib import Path
from tools import read_image
import csv
import numpy as np
from torch.utils.data import Dataset
import os
class DatasetSlice(Dataset):
def __init__(self, dataset, slice=None):
self.dataset = dataset
self.slice = (0, len(self.dataset)) if slice is None else slice
if type(self.slice) is tuple:
self.slice = range(*self.slice)
if hasattr(dataset,'imgs'):
self.imgs = [dataset.imgs[i] for i in self.slice]
if hasattr(dataset,'classes'):
self.classes = dataset.classes
def __getitem__(self, index):
return self.dataset[self.slice[index]]
def __len__(self):
return len(self.slice)
class COCOImageDataset(Dataset):
def __init__(self,
dataset_dir,
annotation,
split):
self.split = split
self.dataset_dir = dataset_dir
self.annotation = annotation
self.data = self._get_data(split, self.annotation)
self.classes = list(self._get_classes(self.data))
def __getitem__(self, index):
img_path, pid = self.data[index]
img = read_image(img_path)
return img, {'class_id': pid, 'file': img_path}
def __len__(self):
return len(self.data)
def _get_classes(self, data):
classes = set([items[1] for items in data])
return classes
def _get_data(self, split, annotation):
""" Get database from COCO anntations """
result = []
with open(annotation, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if (row["reid_split"] == split):
image_path = self._get_image_path(row["file"])
image_pid = row["class_id"]
result.append((image_path,image_pid))
return result
def _get_image_path(self, filename):
return os.path.join(self.dataset_dir, filename)
class SimpleDataset(Dataset):
def __init__(self,
dataset_dir):
self.dataset_dir = dataset_dir
self.data = self._get_data(dataset_dir)
self.classes = list(self._get_classes(self.data))
def __getitem__(self, index):
img_path, pid = self.data[index]
img = read_image(img_path)
return img, {'class_id': pid, 'file': img_path, 'dataset_dir':self.dataset_dir}
def __len__(self):
return len(self.data)
def _get_classes(self, data):
classes = set([items[1] for items in data])
return classes
def _get_data(self, dataset_dir):
dataset_dir = Path(dataset_dir)
result = []
for class_dir in [x for x in dataset_dir.iterdir() if x.is_dir()]:
for img in class_dir.iterdir():
result.append((str(img), class_dir.name))
return result
def get_labels(self):
labels = [items[1] for items in self.data]
return labels
class SequenceDataset(Dataset):
def __init__(self,
dataset_dir):
self.dataset_dir = dataset_dir
self.data = self._get_data(dataset_dir)
self.classes = list(self._get_classes(self.data))
def __getitem__(self, index):
img_path, pid, seq = self.data[index]
img = read_image(img_path)
return img, {'class_id': pid, 'sequence_id': seq, 'dataset_dir':self.dataset_dir, 'file':img_path}
def __len__(self):
return len(self.data)
def _get_classes(self, data):
classes = set([items[1] for items in data])
return classes
def _get_sequences(self, data):
seqs = np.unique([items[2] for items in data])
return seqs
def get_sequence_ids(self, k=None):
data = self.data
seqs = self._get_sequences(data)
counts = [0] *len(seqs)
res = []
# res = [np.where(seqs==items[2])[0][0] for items in data]
for items in data:
seq_id = np.where(seqs==items[2])[0][0]
if k is None or counts[seq_id] < k:
counts[seq_id]+=1
else:
seq_id = -1
res.append(seq_id)
return res
def get_sequence_labels(self):
data = self.data
seqs = self._get_sequences(data)
res = []
for seq in seqs:
ind = next(i for (i,x) in enumerate(data) if x[2] == seq)
res.append(data[ind][1])
return res
def get_labels(self):
labels = [items[1] for items in self.data]
return labels
def get_sequence_files(self):
data = self.data
seqs = self._get_sequences(data)
res = []
for seq in seqs:
ind = next(i for (i,x) in enumerate(data) if x[2] == seq)
res.append(data[ind][0])
return res
def get_sequence_lenghts(self):
data = self.data
seqs = self._get_sequences(data)
res = []
for seq in seqs:
ln = len(list(filter(lambda x: x[2] == seq, data)))
res.append(ln)
return res
def _get_data(self, dataset_dir):
dataset_dir = Path(dataset_dir)
result = []
for class_dir in [x for x in dataset_dir.iterdir() if x.is_dir()]:
for seq in class_dir.iterdir():
images = []
for img in seq.iterdir():
result.append((str(img), class_dir.name, seq.name))
return result
class DBDataset:
def __init__(self, seal_type, cfg):
self.ids = get_db_ids(cfg["conn"], "norppa")
self.conn = cfg["conn"]
self.cfg = cfg
self._fisher_vectors = None
self._fisher_vectors_seq = None
def get_ids(self):
return self.ids
def get_label(self, i):
return get_label(self.conn, self.ids[i])[0]
def get_label_seq(self, i):
return self._get_sequences()[i]
def get_labels(self):
result = []
for (i, el) in enumerate(self.ids):
result.append(self.get_label(i))
return result
def _get_sequences(self):
seqs = np.unique(self.get_labels())
return seqs
def get_sequence_ids(self):
data = self.ids
seqs = self._get_sequences()
res = [np.where(seqs==self.get_label(i))[0][0] for (i,item) in enumerate(data)]
return res
def get_fisher_vector(self, i):
if self._fisher_vectors is not None:
return self._fisher_vectors[i]
patch_features = get_patch_features(self.conn, self.ids[i])
encoded = fisher_single(patch_features, self.cfg)
return encoded
def get_fisher_vectors(self):
if self._fisher_vectors is None:
db_ids, db_features = get_patch_features_multiple_ids(self.conn, self.ids)
encoded = fisher_multiple(db_features, db_ids, self.cfg)
self._fisher_vectors = encoded
return self._fisher_vectors
def get_fisher_vectors_seq(self):
if self._fisher_vectors_seq is None:
db_ids, db_features = get_patch_features_multiple_ids(self.conn, self.ids)
sequence_ids = self.get_sequence_ids()
seqs = np.array(sequence_ids).squeeze()
db_ids = [seqs[self.ids.index(db_id)] for db_id in db_ids]
encoded = fisher_multiple(db_features, db_ids, self.cfg)
self._fisher_vectors_seq = encoded
return self._fisher_vectors_seq
def get_patches(self, i):
return get_patches(self.conn, i)
def get_seq_img_ids(self, seq_id):
data = self.get_sequence_ids()
ids = [i for (i,x) in enumerate(data) if x == seq_id]
res = [self.ids[ind] for ind in ids]
return res
def get_patches_seq(self, seq_id):
ids = self.get_seq_img_ids(seq_id)
return get_patches_multiple(self.conn, ids) | 8,014 | 29.708812 | 106 | py |
NORPPA | NORPPA-main/vis_new_pattern.py | import os
# import sys
# sys.path.append('/ekaterina/work/src/NORPPA/repository/NORPPA')
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from config_whaleshark import config
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
import zipfile
import tensorflow as tf
import wget
import pickle
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from torchvision.datasets.utils import download_url
from datasets import SimpleDataset, DatasetSlice
from tools import *
from tonemapping.tonemapping import tonemap, tonemap_step
from segmentation.segmentation import segment
from pattern_extraction.extract_pattern import extract_pattern
from reidentification.identify import *
from reidentification.visualisation import visualise_match
from reidentification.find_matches import find_matches
import os
# import sys
# sys.path.append('/ekaterina/work/src/NORPPA/repository/NORPPA')
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from config_whaleshark import config
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
import zipfile
import tensorflow as tf
import wget
import pickle
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from torchvision.datasets.utils import download_url
from datasets import SimpleDataset, DatasetSlice
from tools import apply_pipeline, crop_step, curry, apply_pipeline_dataset, get_save_step, apply_sequential, compose, compose_sequential
from tonemapping.tonemapping import tonemap, tonemap_step
from segmentation.segmentation import segment
from pattern_extraction.extract_pattern import extract_pattern
from reidentification.identify import encode_single, encode_pipeline, encode_dataset, identify, identify_single
from reidentification.visualisation import visualise_match
cfg = config()
cfg["codebooks"] = load_pickle("whaleshark_norppa_tonemapped_pattern_maxim_oldgmm_codebooks_scale1.pickle")
print("loaded codebooks")
encoded_train_dataset = load_pickle("whaleshark_norppa_tonemapped_pattern_maxim_oldgmm_encoded_scale1.pickle")
print("loaded encodings")
test_pipeline1 = [
curry(identify, encoded_train_dataset, cfg["topk"], leave_one_out=True),
curry(print_topk_accuracy, label="Before geometric verification:"),
]
test_pipeline2 = [
curry_sequential(find_matches, cfg),
curry_sequential(apply_geometric, cfg["geometric"]),
curry(print_topk_accuracy, label="After geometric verification:"),
# curry_sequential(visualise_match, cfg["topk"])
]
matches1 = apply_pipeline_dataset(encoded_train_dataset, test_pipeline1)
print("found matches 1")
save_pickle(matches1, "temp/files/new_pattern.matches1.pickle")
matches2 = apply_pipeline_dataset(matches1, test_pipeline2)
print("found matches 1")
save_pickle(matches2, "temp/files/new_pattern.matches2.pickle")
| 3,022 | 34.564706 | 136 | py |
NORPPA | NORPPA-main/config_whaleshark.py |
import sys
from pathlib import Path
import cv2
import numpy as np
file_folder = Path(__file__).resolve().parent
sys.path.append(str(file_folder / "reidentification/hesaff_pytorch"))
from HessianAffinePatches import init_affnet, init_orinet, init_hardnet
from segmentation.detectron_segment import create_predictor
from pattern_extraction.extract_pattern import create_unet
from torchvision.datasets.utils import download_url
from sql import create_connection
def init_file(path, url, allow_download=True):
if Path(path).exists():
return path
elif allow_download:
download_url(url, Path(path).parent, Path(path).name)
return path
else:
raise Exception("The file {path} is not found!")
def config(use_cuda=True, allow_download=True):
config = {}
base_dir = Path(__file__).resolve().parent
mount_path = "/ekaterina/work/data/many_dataset/"
path_db = mount_path + "DB_test.db"
config["conn"] = create_connection(path_db)
config["net"] = init_hardnet(init_file(base_dir/"models/HardNet++.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/HardNet++.pth",
allow_download=allow_download),
use_cuda=use_cuda)
affnet = init_affnet(init_file(base_dir/"models/AffNet.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/AffNet.pth",
allow_download=allow_download),
use_cuda=use_cuda)
orinet = init_orinet(init_file(base_dir/"models/OriNet.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/OriNet.pth",
allow_download=allow_download),
use_cuda=use_cuda)
codebooks_path = init_file(base_dir/'codebooks/codebooks.pickle',
"https://github.com/kwadraterry/NORPPA/raw/models/codebooks/codebooks.pickle",
allow_download=allow_download)
config["codebooks_path"] = Path("/ekaterina/work/src/NORPPA/repository/NORPPA/codebooks/whaleshark_tonemapped_harrisz.pickle")
config["codebooks"] = None
config["hesaff_args"] = {'init_sigma': 1.3213713243956968,
'mrSize': 9.348280997446642,
'nlevels': 10,
'num_features': 480,
'unsharp_amount': 6.80631647207343,
'unsharp_radius': None,
'use_cuda' :use_cuda,
'patch_scale': 1}
config["detectron_predictor"] = create_predictor(init_file(base_dir/"models/R-101-FPN_150ims.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/R-101-FPN_150ims.pth",
allow_download=allow_download),
not use_cuda)
config["unet"] = create_unet(init_file(base_dir/"models/unet_seals_512.hdf5",
"https://github.com/kwadraterry/NORPPA/raw/models/models/unet_seals_512.hdf5",
allow_download=allow_download))
config["hesaff_args"]["AffNet"] = affnet
config["hesaff_args"]["OriNet"] = orinet
config["hesaff_args"]["patch_size"] = 32
config["sift_args"] = {'patch_size': 32}
config["use_hesaff"] = False
config["patch_extraction"] = "sift"
config["pca"] = 64
config["use_kpca"] = False
config["n_clusters"] = 1400
config["features_shape"] = 64
config["topk"] = 5
config["kernel"] = "rbf"
config["use_cuda"] = use_cuda
config["dataset_dir"] = base_dir/'data'
config["batch_size"] = 256
config["geometric"] = {
"method": cv2.RANSAC,
"max_iters": 5000,
"max_reproj_err": .05,
"estimator": lambda d, mask: d ** np.sum(mask)
}
return config | 4,166 | 41.520408 | 131 | py |
NORPPA | NORPPA-main/codebooks_whaleshark.py | import os
# import sys
# sys.path.append('/ekaterina/work/src/NORPPA/repository/NORPPA')
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from config_whaleshark import config
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
import zipfile
import tensorflow as tf
import wget
import pickle
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from torchvision.datasets.utils import download_url
from datasets import SimpleDataset, DatasetSlice
from tools import *
from tonemapping.tonemapping import tonemap, tonemap_step
from segmentation.segmentation import segment
from pattern_extraction.extract_pattern import extract_pattern
from reidentification.identify import encode_single, encode_pipeline, encode_dataset, identify, identify_single
from reidentification.visualisation import visualise_match
cfg = config()
train_path = "/ekaterina/work/data/whaleshark_norppa_tonemapped/train"
test_path = "/ekaterina/work/data/whaleshark_norppa/test"
dataset_train = Path(train_path)
dataset_test = Path(test_path)
train_dataset = SimpleDataset(dataset_train)
codebooks_path = '/ekaterina/work/src/NORPPA/repository/NORPPA/codebooks/whaleshark_tonemapped.pickle'
codebooks, encoded_dataset = encode_dataset(train_dataset, cfg, compute_codebooks=True)
try:
with open("tonemapped_whaleshark.pickle", 'wb') as f_file:
pickle.dump(encoded_dataset, f_file, protocol=4)
except Exception as e:
print(e)
try:
with open(codebooks_path, "wb") as codebooks_file:
pickle.dump(codebooks, codebooks_file, protocol=4)
except Exception as e:
print(e)
| 1,668 | 30.490566 | 111 | py |
NORPPA | NORPPA-main/reidentification/geometric.py | from skimage.measure import label
from sklearn.decomposition import KernelPCA
from skimage.morphology import convex_hull_image, skeletonize
from cyvlfeat.fisher import fisher
from PIL import Image
import math
from sql import *
import torch
from torchvision import transforms
import pickle
from reidentification.encoding_utils import *
import numpy as np
import torchvision.datasets as dset
import gc
from datasets import DatasetSlice
import pickle
import cv2
import itertools as it
from HessianAffinePatches import extract_hesaff_patches
# Re-orders original results. Returns the new order of indices.
def re_evaluate(matches, est_cfg):
dists = [match["distance"] for match in matches]
inliers = geometric_verification(matches, est_cfg)
if len(inliers) == 0:
return matches
order = [(est_cfg["estimator"](dist, mask), mask, i) for i, (dist, mask) in enumerate(zip(dists, inliers))]
order.sort(key = lambda x: (x[0], x[2]))
return order
# Returns the logical array presenting inlier point correspondences, inliers set to 1 and outliers set to 0.
def geometric_verification(matches, est_cfg):
qr_patches_all = [match["patches"][0] for match in matches]
db_patches_all = [match["patches"][1] for match in matches]
qr_coordinates, db_coordinates = get_coordinates(qr_patches_all,
db_patches_all)
homographies, inliers = estimate_homographies(qr_coordinates,
db_coordinates,
est_cfg)
return inliers
def safe_max(x, *args, **kw_args):
if len(x) == 0:
return x
else:
return max(x, *args, **kw_args)
def safe_mean(x, *args, **kw_args):
if len(x) == 0:
return x
else:
return np.mean(x, *args, **kw_args)
# Extracts the x,y point correspondences and translate and scale point sets inside unit circle.
def get_coordinates(qr_patches_all, db_patches_all):
# get xy-pairs
qr_all = np.array([np.array([[qr[0], qr[1]] for qr in qr_patches])
for qr_patches in qr_patches_all], dtype=object)
db_all = np.array([np.array([[db[0], db[1]] for db in db_patches])
for db_patches in db_patches_all], dtype=object)
# translate to origin
qr_mean = np.array([safe_mean(qr_coords, axis=0) for qr_coords in qr_all], dtype=object)
db_mean = np.array([safe_mean(db_coords, axis=0) for db_coords in db_all], dtype=object)
for i, (qr, db) in enumerate(zip(qr_mean, db_mean)):
qr_all[i] -= qr
db_all[i] -= db
# set |p| <= 1
max_l_qr = [safe_max(qr, key=lambda p: np.linalg.norm(p)) for qr in qr_all]
max_l_db = [safe_max(db, key=lambda p: np.linalg.norm(p)) for db in db_all]
for i, (qr, db) in enumerate(zip(max_l_qr, max_l_db)):
if len(qr) == 0 or len(db) == 0:
continue
a, b = np.linalg.norm(qr), np.linalg.norm(db)
qr_all[i] /= a if a > np.finfo(float).eps else 1
db_all[i] /= b if b > np.finfo(float).eps else 1
return qr_all, db_all
def save_findHomography(qr_coords,db_coords,est_cfg):
if len(qr_coords)< 4 or len(db_coords) < 4:
return np.eye(3),np.full((len(qr_coords), 1), True)
return cv2.findHomography(qr_coords.astype(np.float),
db_coords.astype(np.float),
method=est_cfg["method"],
ransacReprojThreshold=est_cfg["max_reproj_err"],
maxIters=est_cfg["max_iters"])
# Finds homographies for each query-database image pairs.
def estimate_homographies(qr_coords_all,
db_coords_all,
est_cfg):
models = [
save_findHomography(qr_coords,db_coords,est_cfg)
for qr_coords, db_coords in zip(qr_coords_all, db_coords_all)
]
homographies = [H for H, _ in models]
inliers = [I for _, I in models]
return homographies, inliers
| 4,128 | 32.298387 | 111 | py |
NORPPA | NORPPA-main/reidentification/identify.py | from skimage.measure import label
from sklearn.decomposition import KernelPCA
from skimage.morphology import convex_hull_image, skeletonize
from cyvlfeat.fisher import fisher
from PIL import Image
import math
from sql import *
import torch
from torchvision import transforms
import pickle
from reidentification.encoding_utils import *
from reidentification.geometric import *
import numpy as np
import torchvision.datasets as dset
import gc
from datasets import DatasetSlice
import pickle
import cv2
import itertools as it
from HessianAffinePatches import extract_hesaff_patches
from extract_patches.core import extract_patches as keypoints_to_patches
import kornia as K
import kornia.feature as KF
from kornia_moons.feature import *
torch.autograd.set_grad_enabled(False)
from tqdm import tqdm
def get_patch_num(width, patch_size, step):
return math.ceil((width - patch_size + 1)/step)
def crop_image(img, target, size):
start_x, start_y = target
end_x, end_y = start_x + size, start_y + size
cropped = img.crop((start_x, start_y, end_x, end_y))
return cropped
def thickness_resize(img, thickness=2):
img0 = np.array(img) > 0
area = np.sum(img0)
img1 = skeletonize(img0)
length = np.sum(img1)
thickness_current = area / length
ratio = thickness / thickness_current
return (img.resize(tuple(int(i * ratio) for i in img.size), Image.ANTIALIAS), ratio)
def check_filled_area(min_val=0.15):
def func(patch):
return np.mean(patch > 0) > min_val
return func
def check_largest_CC(min_val=100):
def func(patch):
labels = label(patch)
if labels.max() == 0:
return False
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1
return sum(largestCC) >= min_val
return func
def check_convex_hull_area(min_val=0.5):
def func(patch):
if np.max(patch) == 0:
return False
chull = convex_hull_image(patch.copy())
return np.mean(chull > 0) > min_val
return func
def check_bounding_box_area(min_val=0.8):
def func(patch):
if np.max(patch) == 0:
return False
where = np.nonzero(patch)
y1, x1 = np.amin(where, axis=1)
y2, x2 = np.amax(where, axis=1)
ratio = ((x2 - x1) * (y2 - y1)) / (patch.shape[-1] * patch.shape[-2])
return ratio > min_val
return func
def always_true(patch):
return True
def test_patch(patch, filters):
return np.all([f(patch) for f in filters])
def get_step_range(size, patch_size, step):
fin = size-patch_size+1
return [0] if fin < 1 else range(0, fin, step)
def extract_dense_patches(img, patch_size=48, step=24, final_size=48,
filter_funcs=[check_filled_area(),
check_convex_hull_area()]):
size = img.size
result = []
pos = []
for x in get_step_range(size[0], patch_size, step):
for y in get_step_range(size[1], patch_size, step):
patch = crop_image(img, (x, y), patch_size)
if patch_size != final_size:
patch = patch.resize((final_size, final_size), Image.NONE)
patch = np.asarray(patch)
if test_patch(patch, filter_funcs):
result.append(patch)
pos.append((x, y))
if len(result) == 0:
return None
result = np.stack(result, axis=0)
return result, pos
def apply_net(patch, net):
with torch.no_grad():
return net.forward(patch).detach().cpu().numpy()
def apply_batch_net(patches, net, batch_size=256):
indices = np.append(np.arange(start=0, stop=patches.shape[0], step=batch_size), patches.shape[0])
ind_pairs = list(zip(indices[:-1], indices[1:]))
return np.concatenate([apply_net(patches[start:end,...], net) for (start, end) in ind_pairs])
def cvkeypoint_to_ell(keypoint):
return [keypoint.pt[0], keypoint.pt[1], keypoint.size/2, keypoint.size/2, keypoint.angle * math.pi/180]
def extract_sift_patches(image, patch_size=32, sigma=1.6, nfeatures=480, nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=5, scale=2):
image = np.array(image)
sift = cv2.SIFT_create(nfeatures=nfeatures, nOctaveLayers=nOctaveLayers, contrastThreshold=contrastThreshold, edgeThreshold=edgeThreshold, sigma=sigma)
keypoints = sift.detect(image)
if scale != 1:
for i in range(len(keypoints)):
keypoints[i].size *= scale
patches = np.array(keypoints_to_patches(keypoints, image, patch_size, sigma, 'cv2'))
ells = np.array([np.array(cvkeypoint_to_ell(kp)) for kp in keypoints])
return patches, ells
def patch_extraction(image, config):
if config["use_hesaff"] or config.get("patch_extraction", "hesaff") == "hesaff":
return extract_hesaff_patches(image, **config["hesaff_args"])
elif config.get("patch_extraction", "hesaff") == "sift":
return extract_sift_patches(image, **(config.get("sift_args", {})))
else:
return extract_dense_patches(image, **config["dense_args"])
def fisher_single(patch_features, cfg):
codebooks = load_codebooks(cfg)
encoding_params = codebooks["gmm"]
encoded = fisher(patch_features, *encoding_params, improved=True)
return encoded
def fisher_multiple(db_features, db_ids, cfg):
codebooks = load_codebooks(cfg)
encoding_params = codebooks["gmm"]
_, indices = np.unique(db_ids, return_inverse=True)
encoded, _ = encode_all_images(db_features, indices, encoding_params)
return encoded
def stable_unique(a):
indexes = np.unique(a, return_index=True)[1]
return [a[index] for index in sorted(indexes)]
def group_by(dataset, group_label, patches):
groups = stable_unique([labels[group_label] for (img, labels) in dataset])
mapping = {group:i for (i,group) in enumerate(groups)}
group_ids = len(dataset) * [None]
group_labels = [{'labels':[], 'class_id':None} for _ in range(len(groups))]
features, inds, all_ells = patches
# all_ells = np.array(all_ells)
for (i, (img, label)) in enumerate(dataset):
group_id = mapping[label[group_label]]
group_ids[i] = group_id
label['id'] = i
filt, = np.where(inds == i)
label['features'] = features[filt, :]
label['ellipses'] = all_ells[i]
group_labels[group_id]['labels'].append(label)
group_labels[group_id]['class_id'] = label['class_id']
return group_ids, group_labels
def getKeyNetAffNetHardNet(num_features=5000, upright=False, scale_laf=1.0):
def init(device):
return KF.KeyNetAffNetHardNet(num_features=num_features, upright=upright, device=device, scale_laf=scale_laf)
def apply(image, detector, dataset_transforms, device):
image = dataset_transforms(image)
image = np.array(image)[None, :, :, None]
timg = K.image_to_tensor(image, False).float()/255.
timg = timg.to(device)
lafs, _, descs = detector(timg)
kps_back = opencv_kpts_from_laf(lafs, scale_laf)
patch_features = descs.cpu()[0, ]
ells = np.array([[kp.pt[0], kp.pt[1], kp.size, kp.size, kp.angle] for kp in kps_back])
return patch_features, ells
return init, apply
def getDISK(pretrained='depth'):
def init(device):
return KF.DISK.from_pretrained(pretrained, device=device)
def apply(image, detector, dataset_transforms, device):
image = np.array(image.convert("RGB"))[None, :, :]
timg = K.image_to_tensor(image, False).float()/255.
timg = timg.to(device)
disk = detector(timg, pad_if_not_divisible=True)[0]
patch_features = disk.descriptors.cpu().numpy()
pts = disk.keypoints.cpu().numpy()
ells = np.array([[kp[0], kp[1], 5, 5, 0] for kp in pts])
return patch_features, ells
return init, apply
def getHessAffNetHardNet(cfg):
def init(device):
return cfg["net"]
def apply(image, detector, dataset_transforms, device):
image = dataset_transforms(image)
if sum(image.getextrema()) == 0:
all_ells.append(None)
labels.append(img_label)
return [], []
patches, ells = patch_extraction(image, cfg)
if patches is None or len(patches) == 0:
return [], []
patch_features = torch.from_numpy(patches/255).float().unsqueeze(1)
if cfg["use_cuda"]:
patch_features = patch_features.cuda()
patch_features = apply_batch_net(patch_features, detector, batch_size=cfg["batch_size"])
return patch_features, ells
return init, apply
def patchify(dataset, config, init_apply=None):
if init_apply is None:
init_apply = getHessAffNetHardNet(config)
result = []
labels = []
inds = []
all_ells = []
ind = 0
dataset_transforms = transforms.Grayscale(num_output_channels=1)
init, apply = init_apply
device = torch.device('cuda') if config['use_cuda'] else torch.device('cpu')
print(f"Using device {device}")
detector = init(device)
for i, (image, img_label) in enumerate(tqdm(dataset)):
if image is None or sum(dataset_transforms(image).getextrema()) == 0:
all_ells.append(None)
labels.append(img_label)
continue
patch_features, ells = apply(image, detector, dataset_transforms, device)
all_ells.append(ells)
inds.extend([i] * patch_features.shape[0])
labels.append(img_label)
result.append(patch_features)
labels = np.array(labels)
return np.vstack(result), np.array(inds), labels, all_ells
def extract_patches(dataset, config, init_apply=None):
return (dataset, patchify(dataset, config, init_apply))
def _encode_patches(dataset_patches, config, codebooks=None, group_label='file'):
(dataset, (features, inds, labels, ellipses)) = dataset_patches
print("Calculating PCA")
if codebooks is None:
features, pca = encode_pca(features, n_components=config["pca"], whiten=True)
else:
features = apply_pca(features, codebooks["pca"])
group_ids, group_labels = group_by(dataset, group_label, (features, inds, ellipses))
group = (group_ids is not None) and (group_labels is not None)
if group and len(group_ids)>1:
groups = np.array(group_ids).squeeze()
updated_inds = groups[inds]
else:
updated_inds = inds
labels = np.array(group_labels)
print("Getting encoding parameters...", flush=True)
if codebooks is None:
encoding_params = get_encoding_parameters(features, n_clusters=config["n_clusters"], verbose=True)
else:
encoding_params = codebooks["gmm"]
print("Encoding...")
features, patch_features = encode_all_images(features, updated_inds, encoding_params)
kpca = None
if config["use_kpca"]:
if codebooks["kpca"] is None:
kpca = KernelPCA(n_components=None, kernel=config["kernel"], remove_zero_eig=True)
features = kpca.fit_transform(features)
elif codebooks["kpca"] is not None:
features = codebooks["kpca"].transform(features)
if codebooks is None:
codebooks = {'pca': pca, 'gmm': encoding_params, 'kpca': kpca}
return features, labels, codebooks
def _encode_dataset(dataset, config, codebooks=None, group_label='file'):
return encode_patches(extract_patches(dataset, config), config, codebooks, group_label)
def do_matching(test_feats, db_feats, percentile=10):
dists, sorted_inds = calculate_dists(test_feats, db_feats)
sorted_dists = np.take_along_axis(dists, sorted_inds, axis=1)
# print(sorted_dists.shape)
if test_feats.size == 0 or db_feats.size == 0:
return (np.array([]), np.array([]), np.array([]))
mean_dist = np.percentile(sorted_dists[:, 0], percentile)
filt = sorted_dists[:, 0] <= mean_dist
sorted_inds = sorted_inds[filt, 0]
sorted_dists = sorted_dists[filt, 0]
similarity = (np.max(sorted_dists) - sorted_dists) / np.max(sorted_dists)
filt = np.nonzero(filt)[0]
similarity[np.isnan(similarity)] = 1.0
max_len = 200
if len(similarity) > max_len:
similarity = similarity[:max_len]
sorted_inds = sorted_inds[:max_len]
filt = filt[:max_len]
return (filt, sorted_inds, similarity)
def do_matching_geom(test_feats, test_patches, db_feats, db_patches, percentile=10):
pass
def match_topk(test_features, db_features, topk, leave_one_out=False):
dists, inds = calculate_dists(test_features, db_features, leave_one_out=leave_one_out)
sorted_inds = np.argsort(dists, axis=1)
dists = np.take_along_axis(dists, sorted_inds, axis=1)
return dists[:, :topk], sorted_inds[:, :topk]
def load_codebooks(cfg):
if cfg['codebooks'] is None:
print(cfg["codebooks_path"])
with open(cfg['codebooks_path'],"rb") as codebooks_file:
cfg['codebooks'] = pickle.load(codebooks_file)
return cfg['codebooks']
def encode_single(image, label, cfg):
if image is None:
return image
dataset_transforms = transforms.Grayscale(num_output_channels=1)
image = dataset_transforms(image)
return encode_dataset([(image, label)], cfg)
def encode_pipeline(input, cfg):
if input[0] is None:
return input
return encode_dataset([input], cfg)
def encode_dataset(dataset, cfg, group_label='file', init_apply_encoders=None, compute_codebooks=False):
patches = extract_patches(dataset, cfg, init_apply_encoders)
return encode_patches(patches, cfg, group_label=group_label, compute_codebooks=compute_codebooks)
def encode_patches(dataset, cfg, group_label='file', compute_codebooks=False):
if compute_codebooks:
codebooks = None
else:
codebooks = load_codebooks(cfg)
query_features, query_labels, codebooks = _encode_patches(dataset, cfg, codebooks, group_label)
if compute_codebooks:
return (codebooks, list(zip(query_features, query_labels)))
else:
return list(zip(query_features, query_labels))
def identify_single(query, database, cfg):
return identify([query], database, cfg)
def get_fisher_vectors(db):
if hasattr(db, 'get_fisher_vectors'):
return db.get_fisher_vectors()
else:
return np.concatenate([f[np.newaxis,...] for (f, _) in db])
def get_label(db, i):
if hasattr(db, 'get_label'):
return db.get_label(i)
else:
return db[i][1]
def identify(query, database=None, topk=5, leave_one_out=False):
if database is None:
database = query
query_features = np.concatenate([f[np.newaxis,...] for (f, _) in query])
def add_fisher_field(label, fisher):
label["fisher"] = fisher
return label
query_labels = [add_fisher_field(l, f) for (f, l) in query]
db_features = get_fisher_vectors(database)
dists, request_ids = match_topk(query_features, db_features, topk, leave_one_out=leave_one_out)
matches = [None] * request_ids.shape[0]
for i in tqdm(range(request_ids.shape[0])):
matches[i] = [None] * request_ids.shape[1]
for j in range(request_ids.shape[1]):
matches[i][j] = {"db_label": add_fisher_field(get_label(database, request_ids[i, j]), db_features[request_ids[i, j]]), "distance": dists[i, j]}
return list(zip(matches, query_labels))
def apply_geometric(input, params):
matches, query_labels = input
order = re_evaluate(matches, params)
for est, mask, k in order:
matches[k]["Geom_Est"] = est
matches[k]["Mask"] = mask
matches = [matches[k] for _, _, k in order]
return [(matches, query_labels)]
def create_sql_database(dataset, cfg, db_components, seal_type="norppa", compute_codebooks=False):
create_database_table(cfg["conn"])
create_patches_table(cfg["conn"])
if compute_codebooks:
codebooks = None
else:
codebooks = load_codebooks(cfg)
db_features, db_labels, patch_features, patches = db_components
now = datetime.now()
now = now.strftime("%d-%m-%YT%H:%M:%S")
for i, (image, image_label) in enumerate(dataset):
img_id = insert_database(cfg["conn"],image_label["file"], db_labels[i]["class_id"], seal_type, db_features[i, ...], now)
if patches[i] is not None:
for j, patch in enumerate(patches[i]):
insert_patches(cfg["conn"], img_id, patch, patch_features[i][j, ...])
del db_features
del patch_features
del patches
del db_labels
gc.collect()
cfg["conn"].commit()
| 16,717 | 33.328542 | 155 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/HandCraftedModules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import numpy as np
from Utils import GaussianBlur, CircularGaussKernel
from LAF import abc2A,rectifyAffineTransformationUpIsUp, sc_y_x2LAFs
from Utils import generate_2dgrid, generate_2dgrid, generate_3dgrid
from Utils import zero_response_at_border
class ScalePyramid(nn.Module):
def __init__(self, nLevels = 3, init_sigma = 1.6, border = 5):
super(ScalePyramid,self).__init__()
self.nLevels = nLevels;
self.init_sigma = init_sigma
self.sigmaStep = 2 ** (1. / float(self.nLevels))
#print 'step',self.sigmaStep
self.b = border
self.minSize = 2 * self.b + 2 + 1;
return
def forward(self,x):
pixelDistance = 1.0;
curSigma = 0.5
if self.init_sigma > curSigma:
sigma = np.sqrt(self.init_sigma**2 - curSigma**2)
curSigma = self.init_sigma
curr = GaussianBlur(sigma = sigma)(x)
else:
curr = x
sigmas = [[curSigma]]
pixel_dists = [[1.0]]
pyr = [[curr]]
j = 0
while True:
curr = pyr[-1][0]
for i in range(1, self.nLevels + 2):
sigma = curSigma * np.sqrt(self.sigmaStep*self.sigmaStep - 1.0 )
#print 'blur sigma', sigma
curr = GaussianBlur(sigma = sigma)(curr)
curSigma *= self.sigmaStep
pyr[j].append(curr)
sigmas[j].append(curSigma)
pixel_dists[j].append(pixelDistance)
if i == self.nLevels:
nextOctaveFirstLevel = F.avg_pool2d(curr, kernel_size = 1, stride = 2, padding = 0)
pixelDistance = pixelDistance * 2.0
curSigma = self.init_sigma
if (nextOctaveFirstLevel[0,0,:,:].size(0) <= self.minSize) or (nextOctaveFirstLevel[0,0,:,:].size(1) <= self.minSize):
break
pyr.append([nextOctaveFirstLevel])
sigmas.append([curSigma])
pixel_dists.append([pixelDistance])
j+=1
return pyr, sigmas, pixel_dists
class HessianResp(nn.Module):
def __init__(self):
super(HessianResp, self).__init__()
self.gx = nn.Conv2d(1, 1, kernel_size=(1,3), bias = False)
self.gx.weight.data = torch.from_numpy(np.array([[[[0.5, 0, -0.5]]]], dtype=np.float32))
self.gy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gy.weight.data = torch.from_numpy(np.array([[[[0.5], [0], [-0.5]]]], dtype=np.float32))
self.gxx = nn.Conv2d(1, 1, kernel_size=(1,3),bias = False)
self.gxx.weight.data = torch.from_numpy(np.array([[[[1.0, -2.0, 1.0]]]], dtype=np.float32))
self.gyy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gyy.weight.data = torch.from_numpy(np.array([[[[1.0], [-2.0], [1.0]]]], dtype=np.float32))
return
def forward(self, x, scale):
gxx = self.gxx(F.pad(x, (1,1,0, 0), 'replicate'))
gyy = self.gyy(F.pad(x, (0,0, 1,1), 'replicate'))
gxy = self.gy(F.pad(self.gx(F.pad(x, (1,1,0, 0), 'replicate')), (0,0, 1,1), 'replicate'))
return torch.abs(gxx * gyy - gxy * gxy) * (scale**4)
class AffineShapeEstimator(nn.Module):
def __init__(self, threshold = 0.001, patch_size = 19):
super(AffineShapeEstimator, self).__init__()
self.threshold = threshold;
self.PS = patch_size
self.gx = nn.Conv2d(1, 1, kernel_size=(1,3), bias = False)
self.gx.weight.data = torch.from_numpy(np.array([[[[-1, 0, 1]]]], dtype=np.float32))
self.gy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gy.weight.data = torch.from_numpy(np.array([[[[-1], [0], [1]]]], dtype=np.float32))
self.gk = torch.from_numpy(CircularGaussKernel(kernlen = self.PS, sigma = (self.PS / 2) /3.0).astype(np.float32))
self.gk = Variable(self.gk, requires_grad=False)
return
def invSqrt(self,a,b,c):
eps = 1e-12
mask = (b != 0).float()
r1 = mask * (c - a) / (2. * b + eps)
t1 = torch.sign(r1) / (torch.abs(r1) + torch.sqrt(1. + r1*r1));
r = 1.0 / torch.sqrt( 1. + t1*t1)
t = t1*r;
r = r * mask + 1.0 * (1.0 - mask);
t = t * mask;
x = 1. / torch.sqrt( r*r*a - 2.0*r*t*b + t*t*c)
z = 1. / torch.sqrt( t*t*a + 2.0*r*t*b + r*r*c)
d = torch.sqrt( x * z)
x = x / d
z = z / d
l1 = torch.max(x,z)
l2 = torch.min(x,z)
new_a = r*r*x + t*t*z
new_b = -r*t*x + t*r*z
new_c = t*t*x + r*r *z
return new_a, new_b, new_c, l1, l2
def forward(self,x, return_mask = False):
if x.is_cuda:
self.gk = self.gk.cuda()
else:
self.gk = self.gk.cpu()
gx = self.gx(F.pad(x, (1, 1, 0, 0), 'replicate'))
gy = self.gy(F.pad(x, (0, 0, 1, 1), 'replicate'))
a1 = (gx * gx * self.gk.unsqueeze(0).unsqueeze(0).expand_as(gx)).view(x.size(0),-1).mean(dim=1)
b1 = (gx * gy * self.gk.unsqueeze(0).unsqueeze(0).expand_as(gx)).view(x.size(0),-1).mean(dim=1)
c1 = (gy * gy * self.gk.unsqueeze(0).unsqueeze(0).expand_as(gx)).view(x.size(0),-1).mean(dim=1)
a, b, c, l1, l2 = self.invSqrt(a1,b1,c1)
rat1 = l1/l2
mask = (torch.abs(rat1) <= 6.).float().view(-1);
if return_mask:
return abc2A(a,b,c), mask
else:
return abc2A(a,b,c)
class OrientationDetector(nn.Module):
def __init__(self,
mrSize = 3.0, patch_size = None):
super(OrientationDetector, self).__init__()
if patch_size is None:
patch_size = 32;
self.PS = patch_size;
self.bin_weight_kernel_size, self.bin_weight_stride = self.get_bin_weight_kernel_size_and_stride(self.PS, 1)
self.mrSize = mrSize;
self.num_ang_bins = 36
self.gx = nn.Conv2d(1, 1, kernel_size=(1,3), bias = False)
self.gx.weight.data = torch.from_numpy(np.array([[[[0.5, 0, -0.5]]]], dtype=np.float32))
self.gy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gy.weight.data = torch.from_numpy(np.array([[[[0.5], [0], [-0.5]]]], dtype=np.float32))
self.angular_smooth = nn.Conv1d(1, 1, kernel_size=3, padding = 1, bias = False)
self.angular_smooth.weight.data = torch.from_numpy(np.array([[[0.33, 0.34, 0.33]]], dtype=np.float32))
self.gk = 10. * torch.from_numpy(CircularGaussKernel(kernlen=self.PS).astype(np.float32))
self.gk = Variable(self.gk, requires_grad=False)
return
def get_bin_weight_kernel_size_and_stride(self, patch_size, num_spatial_bins):
bin_weight_stride = int(round(2.0 * np.floor(patch_size / 2) / float(num_spatial_bins + 1)))
bin_weight_kernel_size = int(2 * bin_weight_stride - 1);
return bin_weight_kernel_size, bin_weight_stride
def get_rotation_matrix(self, angle_in_radians):
angle_in_radians = angle_in_radians.view(-1, 1, 1);
sin_a = torch.sin(angle_in_radians)
cos_a = torch.cos(angle_in_radians)
A1_x = torch.cat([cos_a, sin_a], dim = 2)
A2_x = torch.cat([-sin_a, cos_a], dim = 2)
transform = torch.cat([A1_x,A2_x], dim = 1)
return transform
def forward(self, x, return_rot_matrix = False):
gx = self.gx(F.pad(x, (1,1,0, 0), 'replicate'))
gy = self.gy(F.pad(x, (0,0, 1,1), 'replicate'))
mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
if x.is_cuda:
self.gk = self.gk.cuda()
mag = mag * self.gk.unsqueeze(0).unsqueeze(0).expand_as(mag)
ori = torch.atan2(gy,gx)
o_big = float(self.num_ang_bins) *(ori + 1.0 * math.pi )/ (2.0 * math.pi)
bo0_big = torch.floor(o_big)
wo1_big = o_big - bo0_big
bo0_big = bo0_big % self.num_ang_bins
bo1_big = (bo0_big + 1) % self.num_ang_bins
wo0_big = (1.0 - wo1_big) * mag
wo1_big = wo1_big * mag
ang_bins = []
for i in range(0, self.num_ang_bins):
ang_bins.append(F.adaptive_avg_pool2d((bo0_big == i).float() * wo0_big, (1,1)))
ang_bins = torch.cat(ang_bins,1).view(-1,1,self.num_ang_bins)
ang_bins = self.angular_smooth(ang_bins)
values, indices = ang_bins.view(-1,self.num_ang_bins).max(1)
angle = -((2. * float(np.pi) * indices.float() / float(self.num_ang_bins)) - float(math.pi))
if return_rot_matrix:
return self.get_rotation_matrix(angle)
return angle
class NMS2d(nn.Module):
def __init__(self, kernel_size = 3, threshold = 0):
super(NMS2d, self).__init__()
self.MP = nn.MaxPool2d(kernel_size, stride=1, return_indices=False, padding = kernel_size/2)
self.eps = 1e-5
self.th = threshold
return
def forward(self, x):
#local_maxima = self.MP(x)
if self.th > self.eps:
return x * (x > self.th).float() * ((x + self.eps - self.MP(x)) > 0).float()
else:
return ((x - self.MP(x) + self.eps) > 0).float() * x
class NMS3d(nn.Module):
def __init__(self, kernel_size = 3, threshold = 0):
super(NMS3d, self).__init__()
self.MP = nn.MaxPool3d(kernel_size, stride=1, return_indices=False, padding = (0, int(kernel_size/2), int(kernel_size/2)))
self.eps = 1e-5
self.th = threshold
return
def forward(self, x):
#local_maxima = self.MP(x)
if self.th > self.eps:
return x * (x > self.th).float() * ((x + self.eps - self.MP(x)) > 0).float()
else:
return ((x - self.MP(x) + self.eps) > 0).float() * x
class NMS3dAndComposeA(nn.Module):
def __init__(self, w = 0, h = 0, kernel_size = 3, threshold = 0, scales = None, border = 3, mrSize = 1.0):
super(NMS3dAndComposeA, self).__init__()
self.eps = 1e-7
self.ks = 3
self.th = threshold
self.cube_idxs = []
self.border = border
self.mrSize = mrSize
self.beta = 1.0
self.grid_ones = Variable(torch.ones(3,3,3,3), requires_grad=False)
self.NMS3d = NMS3d(kernel_size, threshold)
if (w > 0) and (h > 0):
self.spatial_grid = generate_2dgrid(h, w, False).view(1, h, w,2).permute(3,1, 2, 0)
self.spatial_grid = Variable(self.spatial_grid)
else:
self.spatial_grid = None
return
def forward(self, low, cur, high, num_features = 0, octaveMap = None, scales = None):
assert low.size() == cur.size() == high.size()
#Filter responce map
self.is_cuda = low.is_cuda;
resp3d = torch.cat([low,cur,high], dim = 1)
mrSize_border = int(self.mrSize);
if octaveMap is not None:
nmsed_resp = zero_response_at_border(self.NMS3d(resp3d.unsqueeze(1)).squeeze(1)[:,1:2,:,:], mrSize_border) * (1. - octaveMap.float())
else:
nmsed_resp = zero_response_at_border(self.NMS3d(resp3d.unsqueeze(1)).squeeze(1)[:,1:2,:,:], mrSize_border)
num_of_nonzero_responces = (nmsed_resp > 0).sum().data.item()
if (num_of_nonzero_responces == 0):
return None,None,None
if octaveMap is not None:
octaveMap = (octaveMap.float() + nmsed_resp.float()).byte()
nmsed_resp = nmsed_resp.view(-1)
if (num_features > 0) and (num_features < num_of_nonzero_responces):
nmsed_resp, idxs = torch.topk(nmsed_resp, k = num_features);
else:
# idxs = nmsed_resp.data.nonzero().squeeze()
idxs = torch.nonzero(nmsed_resp.data).squeeze()
nmsed_resp = nmsed_resp[idxs]
#Get point coordinates grid
if type(scales) is not list:
self.grid = generate_3dgrid(3,self.ks,self.ks)
else:
self.grid = generate_3dgrid(scales,self.ks,self.ks)
self.grid = Variable(self.grid.t().contiguous().view(3,3,3,3), requires_grad=False)
if self.spatial_grid is None:
self.spatial_grid = generate_2dgrid(low.size(2), low.size(3), False).view(1, low.size(2), low.size(3),2).permute(3,1, 2, 0)
self.spatial_grid = Variable(self.spatial_grid)
if self.is_cuda:
self.spatial_grid = self.spatial_grid.cuda()
self.grid_ones = self.grid_ones.cuda()
self.grid = self.grid.cuda()
#residual_to_patch_center
sc_y_x = F.conv2d(resp3d, self.grid,
padding = 1) / (F.conv2d(resp3d, self.grid_ones, padding = 1) + 1e-8)
##maxima coords
sc_y_x[0,1:,:,:] = sc_y_x[0,1:,:,:] + self.spatial_grid[:,:,:,0]
sc_y_x = sc_y_x.view(3,-1).t()
sc_y_x = sc_y_x[idxs,:]
if len(sc_y_x.shape) == 1:
sc_y_x = sc_y_x.unsqueeze(0)
min_size = float(min((cur.size(2)), cur.size(3)))
sc_y_x[:,0] = sc_y_x[:,0] / min_size
sc_y_x[:,1] = sc_y_x[:,1] / float(cur.size(2))
sc_y_x[:,2] = sc_y_x[:,2] / float(cur.size(3))
return nmsed_resp, sc_y_x2LAFs(sc_y_x), octaveMap
| 13,280 | 43.27 | 145 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/HardNet.py | import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import time
import os
import math
import numpy as np
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-8
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sum(torch.abs(x), dim = 1) + self.eps
x= x / norm.expand_as(x)
return x
class HardTFeatNet(nn.Module):
"""TFeat model definition
"""
def __init__(self, sm):
super(HardTFeatNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=7),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=6),
nn.Tanh()
)
self.classifier = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(64, 128, kernel_size=8),
nn.Tanh())
self.SIFT = sm
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1)
sp = torch.std(flat, dim=1) + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def forward(self, input):
x_features = self.features(self.input_norm(input))
x = x_features.view(x_features.size(0), -1)
x = self.classifier(x_features)
return L2Norm()(x.view(x.size(0), -1))
class HardNet(nn.Module):
"""HardNet model definition
"""
def __init__(self):
super(HardNet, self).__init__()
self.features = nn.Sequential( # 32 x 32 x 1
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias = False), # 32 x 32 x 32
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias = False), # 32 x 32 x 32
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False), # 16 x 16 x 64
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias = False), # 16 x 16 x 64
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2,padding=1, bias = False), # 8 x 8 x 128
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias = False), # 8 x 8 x 128
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(128, 128, kernel_size=8, bias = False), # 1 x 1 x 128
nn.BatchNorm2d(128, affine=False),
)
#self.features.apply(weights_init)
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1)
sp = torch.std(flat, dim=1) + 1e-7
return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def forward(self, input):
x_features = self.features(self.input_norm(input))
x = x_features.view(x_features.size(0), -1)
return L2Norm()(x) | 3,589 | 34.544554 | 155 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/HessianAffinePatches.py | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
from SparseImgRepresenter import ScaleSpaceAffinePatchExtractor
from LAF import denormalizeLAFs, LAFs2ell
from Utils import line_prepender
from architectures import AffNetFast, OriNetFast
from skimage.filters import unsharp_mask
import math
from HardNet import HardNet
def init_hardnet(model_weights, use_cuda=True):
descriptor = HardNet()
hncheckpoint = torch.load(model_weights, map_location=torch.device('cpu'))
descriptor.load_state_dict(hncheckpoint['state_dict'])
descriptor.eval()
if use_cuda:
descriptor = descriptor.cuda()
return descriptor
def init_affnet(weightd_fname, patch_size=32, use_cuda=True):
AffNetPix = AffNetFast(PS=patch_size)
checkpoint = torch.load(weightd_fname, map_location=torch.device('cpu'))
AffNetPix.load_state_dict(checkpoint['state_dict'])
AffNetPix.eval()
if use_cuda:
AffNetPix.cuda()
return AffNetPix
def init_orinet(o_fname, patch_size=32, use_cuda=True):
ONet = OriNetFast(PS=patch_size)
checkpoint = torch.load(o_fname, map_location=torch.device('cpu'))
ONet.load_state_dict(checkpoint['state_dict'])
ONet.eval()
if use_cuda:
ONet.cuda()
return ONet
def LAF2ell(LAF):
LAF = np.array(LAF)
u, s, _ = np.linalg.svd(LAF[:2, :2])
angle = math.atan2(u[1, 0], u[0, 0])
return np.array([LAF[0, 2], LAF[1, 2], s[0], s[1], angle])
def extract_hesaff_patches(img_detect,
num_features=400,
nlevels=10,
# mrSize=12,
mrSize=12,
# border=5,
border=5,
num_Baum_iters=26,
patch_size=48,
# init_sigma=1.6,
init_sigma = 200,
unsharp_radius=1,
unsharp_amount=25,
patch_scale=1,
RespNet=None,
OriNet=None,
AffNet=None,
use_cuda=True):
HA = ScaleSpaceAffinePatchExtractor(nlevels=int(nlevels),
mrSize=mrSize,
num_features=int(num_features),
border=border,
num_Baum_iters=num_Baum_iters,
patch_size=patch_size,
init_sigma=init_sigma,
AffNet=AffNet,
OriNet=OriNet,
RespNet=RespNet,
patch_scale=patch_scale)
var_image_detect = torch.autograd.Variable(torch.from_numpy(np.array(img_detect).astype(np.float32)))
var_image_detect = var_image_detect.view(1, 1, var_image_detect.size(0),var_image_detect.size(1))
if use_cuda:
HA = HA.cuda()
var_image_detect = var_image_detect.cuda()
LAFs, resp = HA(var_image_detect)
patches = HA.extract_patches_from_pyr(LAFs, patch_size)
if use_cuda:
patches = patches.cpu()
patches = patches[:, 0, ...].detach().numpy()
# print(unsharp_radius)
if (unsharp_radius is not None) and (unsharp_amount is not None):
for i in range(patches.shape[0]):
patches[i,...] = 255*unsharp_mask(patches[i,...]/255, radius=unsharp_radius, amount=unsharp_amount)
LAFs = LAFs.cpu().detach().numpy()
ells = [None] * LAFs.shape[0]
for i in range(len(ells)):
ells[i] = LAF2ell(LAFs[i, ...])
return patches, ells
| 3,848 | 36.735294 | 111 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/LAF.py | import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from scipy.spatial.distance import cdist
from numpy.linalg import inv
from scipy.linalg import schur, sqrtm
import torch
from torch.autograd import Variable
import torch.nn.functional as F
##########numpy
def invSqrt(a,b,c):
eps = 1e-12
mask = (b != 0)
r1 = mask * (c - a) / (2. * b + eps)
t1 = np.sign(r1) / (np.abs(r1) + np.sqrt(1. + r1*r1));
r = 1.0 / np.sqrt( 1. + t1*t1)
t = t1*r;
r = r * mask + 1.0 * (1.0 - mask);
t = t * mask;
x = 1. / np.sqrt( r*r*a - 2*r*t*b + t*t*c)
z = 1. / np.sqrt( t*t*a + 2*r*t*b + r*r*c)
d = np.sqrt( x * z)
x = x / d
z = z / d
new_a = r*r*x + t*t*z
new_b = -r*t*x + t*r*z
new_c = t*t*x + r*r *z
return new_a, new_b, new_c
def LAFs2ellT(LAFs):
ellipses = torch.zeros((len(LAFs),5))
if LAFs.is_cuda:
ellipses = ellipses.cuda()
scale = torch.sqrt(LAFs[:,0,0]*LAFs[:,1,1] - LAFs[:,0,1]*LAFs[:,1, 0] + 1e-10)#.view(-1,1,1)
unscaled_As = LAFs[:,0:2,0:2] / scale.view(-1,1,1).repeat(1,2,2)
u, W, v = bsvd2x2(unscaled_As)
#W = 1.0 / ((W *scale.view(-1,1,1).repeat(1,2,2))**2)
W[:,0,0] = 1.0 / (scale*scale*W[:,0,0]**2 )
W[:,1,1] = 1.0 / (scale*scale*W[:,1,1]**2 )
A = torch.bmm(torch.bmm(u,W), u.permute(0,2,1))
ellipses[:,0] = LAFs[:,0,2]
ellipses[:,1] = LAFs[:,1,2]
ellipses[:,2] = A[:,0,0]
ellipses[:,3] = A[:,0,1]
ellipses[:,4] = A[:,1,1]
return ellipses
def invSqrtTorch(a,b,c):
eps = 1e-12
mask = (b != 0).float()
r1 = mask * (c - a) / (2. * b + eps)
t1 = torch.sign(r1) / (torch.abs(r1) + torch.sqrt(1. + r1*r1));
r = 1.0 / torch.sqrt( 1. + t1*t1)
t = t1*r;
r = r * mask + 1.0 * (1.0 - mask);
t = t * mask;
x = 1. / torch.sqrt( r*r*a - 2.0*r*t*b + t*t*c)
z = 1. / torch.sqrt( t*t*a + 2.0*r*t*b + r*r*c)
d = torch.sqrt( x * z)
x = x / d
z = z / d
new_a = r*r*x + t*t*z
new_b = -r*t*x + t*r*z
new_c = t*t*x + r*r *z
return new_a, new_b, new_c,
def ells2LAFsT(ells):
LAFs = torch.zeros((len(ells), 2,3))
LAFs[:,0,2] = ells[:,0]
LAFs[:,1,2] = ells[:,1]
a = ells[:,2]
b = ells[:,3]
c = ells[:,4]
sc = torch.sqrt(torch.sqrt(a*c - b*b + 1e-12))
ia,ib,ic = invSqrtTorch(a,b,c) #because sqrtm returns ::-1, ::-1 matrix, don`t know why
A = torch.cat([torch.cat([(ia/sc).view(-1,1,1), (ib/sc).view(-1,1,1)], dim = 2),
torch.cat([(ib/sc).view(-1,1,1), (ic/sc).view(-1,1,1)], dim = 2)], dim = 1)
sc = torch.sqrt(torch.abs(A[:,0,0] * A[:,1,1] - A[:,1,0] * A[:,0,1]))
LAFs[:,0:2,0:2] = rectifyAffineTransformationUpIsUp(A / sc.view(-1,1,1).repeat(1,2,2)) * sc.view(-1,1,1).repeat(1,2,2)
return LAFs
def LAFs_to_H_frames(aff_pts):
H3_x = torch.Tensor([0, 0, 1 ]).unsqueeze(0).unsqueeze(0).repeat(aff_pts.size(0),1,1);
if aff_pts.is_cuda:
H3_x = H3_x.cuda()
return torch.cat([aff_pts, H3_x], dim = 1)
def checkTouchBoundary(LAFs):
pts = torch.FloatTensor([[-1, -1, 1, 1], [-1, 1, -1, 1], [1, 1, 1, 1]]).unsqueeze(0)
if LAFs.is_cuda:
pts = pts.cuda()
out_pts = torch.bmm(LAFs_to_H_frames(LAFs),pts.expand(LAFs.size(0),3,4))[:,:2,:]
good_points = ~(((out_pts > 1.0) + (out_pts < 0.0)).sum(dim=1).sum(dim=1) > 0)
return good_points
def bsvd2x2(As):
Su = torch.bmm(As,As.permute(0,2,1))
phi = 0.5 * torch.atan2(Su[:,0,1] + Su[:,1,0] + 1e-12, Su[:,0,0] - Su[:,1,1] + 1e-12)
Cphi = torch.cos(phi)
Sphi = torch.sin(phi)
U = torch.zeros(As.size(0),2,2)
if As.is_cuda:
U = U.cuda()
U[:,0,0] = Cphi
U[:,1,1] = Cphi
U[:,0,1] = -Sphi
U[:,1,0] = Sphi
Sw = torch.bmm(As.permute(0,2,1),As)
theta = 0.5 * torch.atan2(Sw[:,0,1] + Sw[:,1,0] + 1e-12, Sw[:,0,0] - Sw[:,1,1] + 1e-12)
Ctheta = torch.cos(theta)
Stheta = torch.sin(theta)
W = torch.zeros(As.size(0),2,2)
if As.is_cuda:
W = W.cuda()
W[:,0,0] = Ctheta
W[:,1,1] = Ctheta
W[:,0,1] = -Stheta
W[:,1,0] = Stheta
SUsum = Su[:,0,0] + Su[:,1,1]
SUdif = torch.sqrt((Su[:,0,0] - Su[:,1,1])**2 + 4 * Su[:,0,1]*Su[:,1,0] + 1e-12)
if As.is_cuda:
SIG = torch.zeros(As.size(0),2,2).cuda()
SIG[:,0,0] = torch.sqrt((SUsum+SUdif)/2.0)
SIG[:,1,1] = torch.sqrt((SUsum-SUdif)/2.0)
else:
SIG = torch.zeros(As.size(0),2,2)
SIG[:,0,0] = torch.sqrt((SUsum+SUdif)/2.0)
SIG[:,1,1] = torch.sqrt((SUsum-SUdif)/2.0)
S = torch.bmm(torch.bmm(U.permute(0,2,1),As),W)
C = torch.sign(S)
C[:,0,1] = 0
C[:,1,0] = 0
V = torch.bmm(W,C)
return (U,SIG,V)
def getLAFelongation(LAFs):
u,s,v = bsvd2x2(LAFs[:,:2,:2])
return torch.max(s[:,0,0],s[:,1,1]) / torch.min(s[:,0,0],s[:,1,1])
def getNumCollapsed(LAFs, th = 10.0):
el = getLAFelongation(LAFs)
return (el > th).float().sum()
def Ell2LAF(ell):
A23 = np.zeros((2,3))
A23[0,2] = ell[0]
A23[1,2] = ell[1]
a = ell[2]
b = ell[3]
c = ell[4]
sc = np.sqrt(np.sqrt(a*c - b*b))
ia,ib,ic = invSqrt(a,b,c) #because sqrtm returns ::-1, ::-1 matrix, don`t know why
A = np.array([[ia, ib], [ib, ic]]) / sc
sc = np.sqrt(A[0,0] * A[1,1] - A[1,0] * A[0,1])
A23[0:2,0:2] = rectifyAffineTransformationUpIsUp(A / sc) * sc
return A23
def rectifyAffineTransformationUpIsUp_np(A):
det = np.sqrt(np.abs(A[0,0]*A[1,1] - A[1,0]*A[0,1] + 1e-10))
b2a2 = np.sqrt(A[0,1] * A[0,1] + A[0,0] * A[0,0])
A_new = np.zeros((2,2))
A_new[0,0] = b2a2 / det
A_new[0,1] = 0
A_new[1,0] = (A[1,1]*A[0,1]+A[1,0]*A[0,0])/(b2a2*det)
A_new[1,1] = det / b2a2
return A_new
def ells2LAFs(ells):
LAFs = np.zeros((len(ells), 2,3))
for i in range(len(ells)):
LAFs[i,:,:] = Ell2LAF(ells[i,:])
return LAFs
def LAF2pts(LAF, n_pts = 50):
a = np.linspace(0, 2*np.pi, n_pts);
x = [0]
x.extend(list(np.sin(a)))
x = np.array(x).reshape(1,-1)
y = [0]
y.extend(list(np.cos(a)))
y = np.array(y).reshape(1,-1)
HLAF = np.concatenate([LAF, np.array([0,0,1]).reshape(1,3)])
H_pts =np.concatenate([x,y,np.ones(x.shape)])
H_pts_out = np.transpose(np.matmul(HLAF, H_pts))
H_pts_out[:,0] = H_pts_out[:,0] / H_pts_out[:, 2]
H_pts_out[:,1] = H_pts_out[:,1] / H_pts_out[:, 2]
return H_pts_out[:,0:2]
def convertLAFs_to_A23format(LAFs):
sh = LAFs.shape
if (len(sh) == 3) and (sh[1] == 2) and (sh[2] == 3): # n x 2 x 3 classical [A, (x;y)] matrix
work_LAFs = LAFs.clone().detach() # deepcopy(LAFs)
elif (len(sh) == 2) and (sh[1] == 7): #flat format, x y scale a11 a12 a21 a22
work_LAFs = np.zeros((sh[0], 2,3))
work_LAFs[:,0,2] = LAFs[:,0]
work_LAFs[:,1,2] = LAFs[:,1]
work_LAFs[:,0,0] = LAFs[:,2] * LAFs[:,3]
work_LAFs[:,0,1] = LAFs[:,2] * LAFs[:,4]
work_LAFs[:,1,0] = LAFs[:,2] * LAFs[:,5]
work_LAFs[:,1,1] = LAFs[:,2] * LAFs[:,6]
elif (len(sh) == 2) and (sh[1] == 6): #flat format, x y s*a11 s*a12 s*a21 s*a22
work_LAFs = np.zeros((sh[0], 2,3))
work_LAFs[:,0,2] = LAFs[:,0]
work_LAFs[:,1,2] = LAFs[:,1]
work_LAFs[:,0,0] = LAFs[:,2]
work_LAFs[:,0,1] = LAFs[:,3]
work_LAFs[:,1,0] = LAFs[:,4]
work_LAFs[:,1,1] = LAFs[:,5]
else:
print ('Unknown LAF format')
return None
return work_LAFs
def LAFs2ell(in_LAFs):
LAFs = convertLAFs_to_A23format(in_LAFs)
ellipses = np.zeros((len(LAFs),5))
for i in range(len(LAFs)):
# LAF = deepcopy(LAFs[i,:,:])
LAF = LAFs[i,:,:].clone()
scale = np.sqrt(LAF[0,0]*LAF[1,1] - LAF[0,1]*LAF[1, 0] + 1e-10)
u, W, v = np.linalg.svd(LAF[0:2,0:2] / scale, full_matrices=True)
W[0] = 1. / (W[0]*W[0]*scale*scale)
W[1] = 1. / (W[1]*W[1]*scale*scale)
A = np.matmul(np.matmul(u, np.diag(W)), u.transpose())
ellipses[i,0] = LAF[0,2]
ellipses[i,1] = LAF[1,2]
ellipses[i,2] = A[0,0]
ellipses[i,3] = A[0,1]
ellipses[i,4] = A[1,1]
return ellipses
def visualize_LAFs(img, LAFs, color = 'r', show = False, save_to = None):
work_LAFs = convertLAFs_to_A23format(LAFs)
try:
plt.close('all')
except:
pass
plt.figure()
plt.imshow(255 - img)
if work_LAFs is None:
work_LAFs = []
for i in range(len(work_LAFs)):
ell = LAF2pts(work_LAFs[i,:,:])
plt.plot( ell[:,0], ell[:,1], color)
if show:
plt.show()
if save_to is not None:
plt.savefig(save_to)
return
####pytorch
def get_normalized_affine_shape(tilt, angle_in_radians):
assert tilt.size(0) == angle_in_radians.size(0)
num = tilt.size(0)
tilt_A = Variable(torch.eye(2).view(1,2,2).repeat(num,1,1))
if tilt.is_cuda:
tilt_A = tilt_A.cuda()
tilt_A[:,0,0] = tilt.view(-1);
rotmat = get_rotation_matrix(angle_in_radians)
out_A = rectifyAffineTransformationUpIsUp(torch.bmm(rotmat, torch.bmm(tilt_A, rotmat)))
#re_scale = (1.0/torch.sqrt((out_A **2).sum(dim=1).max(dim=1)[0])) #It is heuristic to for keeping scale change small
#re_scale = (0.5 + 0.5/torch.sqrt((out_A **2).sum(dim=1).max(dim=1)[0])) #It is heuristic to for keeping scale change small
return out_A# * re_scale.view(-1,1,1).expand(num,2,2)
def get_rotation_matrix(angle_in_radians):
angle_in_radians = angle_in_radians.view(-1, 1, 1);
sin_a = torch.sin(angle_in_radians)
cos_a = torch.cos(angle_in_radians)
A1_x = torch.cat([cos_a, sin_a], dim = 2)
A2_x = torch.cat([-sin_a, cos_a], dim = 2)
transform = torch.cat([A1_x,A2_x], dim = 1)
return transform
def rectifyAffineTransformationUpIsUp(A):
det = torch.sqrt(torch.abs(A[:,0,0]*A[:,1,1] - A[:,1,0]*A[:,0,1] + 1e-10))
b2a2 = torch.sqrt(A[:,0,1] * A[:,0,1] + A[:,0,0] * A[:,0,0])
A1_ell = torch.cat([(b2a2 / det).contiguous().view(-1,1,1), 0 * det.view(-1,1,1)], dim = 2)
A2_ell = torch.cat([((A[:,1,1]*A[:,0,1]+A[:,1,0]*A[:,0,0])/(b2a2*det)).contiguous().view(-1,1,1),
(det / b2a2).contiguous().view(-1,1,1)], dim = 2)
return torch.cat([A1_ell, A2_ell], dim = 1)
def rectifyAffineTransformationUpIsUpFullyConv(A):#A is (n,4,h,w) tensor
det = torch.sqrt(torch.abs(A[:,0:1,:,:]*A[:,3:4,:,:] - A[:,1:2,:,:]*A[:,2:3,:,:] + 1e-10))
b2a2 = torch.sqrt(A[:,1:2,:,:] * A[:,1:2,:,:] + A[:,0:1,:,:] * A[:,0:1,:,:])
return torch.cat([(b2a2 / det).contiguous(),0 * det.contiguous(),
(A[:,3:4,:,:]*A[:,1:2,:,:]+A[:,2:3,:,:]*A[:,0:1,:,:])/(b2a2*det),(det / b2a2).contiguous()], dim = 1)
def abc2A(a,b,c, normalize = False):
A1_ell = torch.cat([a.view(-1,1,1), b.view(-1,1,1)], dim = 2)
A2_ell = torch.cat([b.view(-1,1,1), c.view(-1,1,1)], dim = 2)
return torch.cat([A1_ell, A2_ell], dim = 1)
def angles2A(angles):
cos_a = torch.cos(angles).view(-1, 1, 1)
sin_a = torch.sin(angles).view(-1, 1, 1)
A1_ang = torch.cat([cos_a, sin_a], dim = 2)
A2_ang = torch.cat([-sin_a, cos_a], dim = 2)
return torch.cat([A1_ang, A2_ang], dim = 1)
def generate_patch_grid_from_normalized_LAFs(LAFs, w, h, PS):
num_lafs = LAFs.size(0)
min_size = min(h,w)
coef = torch.ones(1,2,3) * min_size
coef[0,0,2] = w
coef[0,1,2] = h
if LAFs.is_cuda:
coef = coef.cuda()
grid = F.affine_grid(LAFs * Variable(coef.expand(num_lafs,2,3)), torch.Size((num_lafs,1,PS,PS)))
grid[:,:,:,0] = 2.0 * grid[:,:,:,0] / float(w) - 1.0
grid[:,:,:,1] = 2.0 * grid[:,:,:,1] / float(h) - 1.0
return grid
def batched_grid_apply(img, grid, batch_size = 32):
n_patches = len(grid)
if n_patches > batch_size:
bs = batch_size
n_batches = int(n_patches / bs + 1)
for batch_idx in range(n_batches):
st = batch_idx * bs
if batch_idx == n_batches - 1:
if (batch_idx + 1) * bs > n_patches:
end = n_patches
else:
end = (batch_idx + 1) * bs
else:
end = (batch_idx + 1) * bs
if st >= end:
continue
if batch_idx == 0:
if img.size(0) != grid.size(0):
first_batch_out = F.grid_sample(img.expand(end - st, img.size(1), img.size(2), img.size(3)), grid[st:end, :,:,:])# kwargs)
else:
first_batch_out = F.grid_sample(img[st:end], grid[st:end, :,:,:])# kwargs)
out_size = torch.Size([n_patches] + list(first_batch_out.size()[1:]))
out = torch.zeros(out_size);
if img.is_cuda:
out = out.cuda()
out[st:end] = first_batch_out
else:
if img.size(0) != grid.size(0):
out[st:end,:,:] = F.grid_sample(img.expand(end - st, img.size(1), img.size(2), img.size(3)), grid[st:end, :,:,:])
else:
out[st:end,:,:] = F.grid_sample(img[st:end], grid[st:end, :,:,:])
return out
else:
if img.size(0) != grid.size(0):
return F.grid_sample(img.expand(grid.size(0), img.size(1), img.size(2), img.size(3)), grid)
else:
return F.grid_sample(img, grid)
def extract_patches(img, LAFs, PS = 32, bs = 32):
w = img.size(3)
h = img.size(2)
ch = img.size(1)
grid = generate_patch_grid_from_normalized_LAFs(LAFs, float(w),float(h), PS)
if bs is None:
return torch.nn.functional.grid_sample(img.expand(grid.size(0), ch, h, w), grid)
else:
return batched_grid_apply(img, grid, bs)
def get_pyramid_inverted_index_for_LAFs(LAFs, PS, sigmas):
return
def extract_patches_from_pyramid_with_inv_index(scale_pyramid, pyr_inv_idxs, LAFs, PS = 19):
patches = torch.zeros(LAFs.size(0),scale_pyramid[0][0].size(1), PS, PS)
if LAFs.is_cuda:
patches = patches.cuda()
patches = Variable(patches)
if pyr_inv_idxs is not None:
for i in range(len(scale_pyramid)):
for j in range(len(scale_pyramid[i])):
cur_lvl_idxs = pyr_inv_idxs[i][j]
if cur_lvl_idxs is None:
continue
cur_lvl_idxs = cur_lvl_idxs.view(-1)
#print i,j,cur_lvl_idxs.shape
patches[cur_lvl_idxs,:,:,:] = extract_patches(scale_pyramid[i][j], LAFs[cur_lvl_idxs, :,:], PS, 32 )
return patches
def get_inverted_pyr_index(scale_pyr, pyr_idxs, level_idxs):
pyr_inv_idxs = []
### Precompute octave inverted indexes
for i in range(len(scale_pyr)):
pyr_inv_idxs.append([])
cur_idxs = pyr_idxs == i #torch.nonzero((pyr_idxs == i).data)
for j in range(0, len(scale_pyr[i])):
cur_lvl_idxs = torch.nonzero(((level_idxs == j) * cur_idxs).data)
if cur_lvl_idxs.size(0) == 0:
pyr_inv_idxs[i].append(None)
else:
pyr_inv_idxs[i].append(cur_lvl_idxs.squeeze())
return pyr_inv_idxs
def denormalizeLAFs(LAFs, w, h):
w = float(w)
h = float(h)
num_lafs = LAFs.size(0)
min_size = min(h,w)
coef = torch.ones(1,2,3).float() * min_size
coef[0,0,2] = w
coef[0,1,2] = h
if LAFs.is_cuda:
coef = coef.cuda()
return Variable(coef.expand(num_lafs,2,3)) * LAFs
def normalizeLAFs(LAFs, w, h):
w = float(w)
h = float(h)
num_lafs = LAFs.size(0)
min_size = min(h,w)
coef = torch.ones(1,2,3).float() / min_size
coef[0,0,2] = 1.0 / w
coef[0,1,2] = 1.0 / h
if LAFs.is_cuda:
coef = coef.cuda()
return Variable(coef.expand(num_lafs,2,3)) * LAFs
def sc_y_x2LAFs(sc_y_x):
base_LAF = torch.eye(2).float().unsqueeze(0).expand(sc_y_x.size(0),2,2)
if sc_y_x.is_cuda:
base_LAF = base_LAF.cuda()
base_A = Variable(base_LAF, requires_grad=False)
A = sc_y_x[:,:1].unsqueeze(1).expand_as(base_A) * base_A
LAFs = torch.cat([A,
torch.cat([sc_y_x[:,2:].unsqueeze(-1),
sc_y_x[:,1:2].unsqueeze(-1)], dim=1)], dim = 2)
return LAFs
def sc_y_x_and_A2LAFs(sc_y_x, A_flat):
base_A = A_flat.view(-1,2,2)
A = sc_y_x[:,:1].unsqueeze(1).expand_as(base_A) * base_A
LAFs = torch.cat([A,
torch.cat([sc_y_x[:,2:].unsqueeze(-1),
sc_y_x[:,1:2].unsqueeze(-1)], dim=1)], dim = 2)
return LAFs
def get_LAFs_scales(LAFs):
return torch.sqrt(torch.abs(LAFs[:,0,0] *LAFs[:,1,1] - LAFs[:,0,1] * LAFs[:,1,0]) + 1e-12)
def get_pyramid_and_level_index_for_LAFs(dLAFs, sigmas, pix_dists, PS):
scales = get_LAFs_scales(dLAFs);
needed_sigmas = scales / PS;
sigmas_full_list = []
level_idxs_full = []
oct_idxs_full = []
for oct_idx in range(len(sigmas)):
sigmas_full_list = sigmas_full_list + list(np.array(sigmas[oct_idx])*np.array(pix_dists[oct_idx]))
oct_idxs_full = oct_idxs_full + [oct_idx]*len(sigmas[oct_idx])
level_idxs_full = level_idxs_full + list(range(0,len(sigmas[oct_idx])))
oct_idxs_full = torch.LongTensor(oct_idxs_full)
level_idxs_full = torch.LongTensor(level_idxs_full)
closest_imgs = cdist(np.array(sigmas_full_list).reshape(-1,1), needed_sigmas.data.cpu().numpy().reshape(-1,1)).argmin(axis = 0)
closest_imgs = torch.from_numpy(closest_imgs)
if dLAFs.is_cuda:
closest_imgs = closest_imgs.cuda()
oct_idxs_full = oct_idxs_full.cuda()
level_idxs_full = level_idxs_full.cuda()
return Variable(oct_idxs_full[closest_imgs]), Variable(level_idxs_full[closest_imgs])
| 17,704 | 36.352321 | 142 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/SparseImgRepresenter.py | import torch
import torch.nn as nn
import numpy as np
import math
import torch.nn.functional as F
from torch.autograd import Variable
from copy import deepcopy
from Utils import GaussianBlur, batch_eig2x2, line_prepender, batched_forward
from LAF import LAFs2ell,abc2A, angles2A, generate_patch_grid_from_normalized_LAFs, extract_patches, get_inverted_pyr_index, denormalizeLAFs, extract_patches_from_pyramid_with_inv_index, rectifyAffineTransformationUpIsUp, ells2LAFs
from LAF import get_pyramid_and_level_index_for_LAFs, normalizeLAFs, checkTouchBoundary
from HandCraftedModules import HessianResp, AffineShapeEstimator, OrientationDetector, ScalePyramid, NMS3dAndComposeA
import time
class ScaleSpaceAffinePatchExtractor(nn.Module):
def __init__(self,
border = 16,
num_features = 500,
patch_size = 32,
mrSize = 3.0,
nlevels = 3,
num_Baum_iters = 0,
init_sigma = 1.6,
th = None,
patch_scale = 1,
RespNet = None, OriNet = None, AffNet = None):
super(ScaleSpaceAffinePatchExtractor, self).__init__()
self.mrSize = mrSize
self.PS = patch_size
self.b = border;
self.num = num_features
self.nlevels = nlevels
self.num_Baum_iters = num_Baum_iters
self.init_sigma = init_sigma
self.th = th;
self.patch_scale = patch_scale
if th is not None:
self.num = -1
else:
self.th = 0
if RespNet is not None:
self.RespNet = RespNet
else:
self.RespNet = HessianResp()
if OriNet is not None:
self.OriNet = OriNet
else:
self.OriNet= OrientationDetector(patch_size = 19);
if AffNet is not None:
self.AffNet = AffNet
else:
self.AffNet = AffineShapeEstimator(patch_size = 19)
self.ScalePyrGen = ScalePyramid(nLevels = self.nlevels, init_sigma = self.init_sigma, border = self.b)
return
def multiScaleDetector(self,x, num_features = 0):
t = time.time()
self.scale_pyr, self.sigmas, self.pix_dists = self.ScalePyrGen(x)
### Detect keypoints in scale space
aff_matrices = []
top_responces = []
pyr_idxs = []
level_idxs = []
det_t = 0
nmst = 0
for oct_idx in range(len(self.sigmas)):
#print oct_idx
octave = self.scale_pyr[oct_idx]
sigmas_oct = self.sigmas[oct_idx]
pix_dists_oct = self.pix_dists[oct_idx]
low = None
cur = None
high = None
octaveMap = (self.scale_pyr[oct_idx][0] * 0).byte()
nms_f = NMS3dAndComposeA(w = octave[0].size(3),
h = octave[0].size(2),
border = self.b, mrSize = self.mrSize)
for level_idx in range(1, len(octave)-1):
if cur is None:
low = torch.clamp(self.RespNet(octave[level_idx - 1], (sigmas_oct[level_idx - 1 ])) - self.th, min = 0)
else:
low = cur
if high is None:
cur = torch.clamp(self.RespNet(octave[level_idx ], (sigmas_oct[level_idx ])) - self.th, min = 0)
else:
cur = high
high = torch.clamp(self.RespNet(octave[level_idx + 1], (sigmas_oct[level_idx + 1 ])) - self.th, min = 0)
top_resp, aff_matrix, octaveMap_current = nms_f(low, cur, high,
num_features = num_features,
octaveMap = octaveMap,
scales = sigmas_oct[level_idx - 1:level_idx + 2])
if top_resp is None:
continue
if top_resp.dim() == 0:
top_resp = torch.unsqueeze(top_resp, 0)
octaveMap = octaveMap_current
aff_matrices.append(aff_matrix), top_responces.append(top_resp)
pyr_id = Variable(oct_idx * torch.ones(aff_matrix.size(0)))
lev_id = Variable((level_idx - 1) * torch.ones(aff_matrix.size(0))) #prevBlur
if x.is_cuda:
pyr_id = pyr_id.cuda()
lev_id = lev_id.cuda()
pyr_idxs.append(pyr_id)
level_idxs.append(lev_id)
all_responses = torch.cat(top_responces, dim = 0)
aff_m_scales = torch.cat(aff_matrices,dim = 0)
pyr_idxs_scales = torch.cat(pyr_idxs,dim = 0)
level_idxs_scale = torch.cat(level_idxs, dim = 0)
if (num_features > 0) and (num_features < all_responses.size(0)):
all_responses, idxs = torch.topk(all_responses, k = num_features);
LAFs = torch.index_select(aff_m_scales, 0, idxs)
final_pyr_idxs = pyr_idxs_scales[idxs]
final_level_idxs = level_idxs_scale[idxs]
else:
return all_responses, aff_m_scales, pyr_idxs_scales , level_idxs_scale
return all_responses, LAFs, final_pyr_idxs, final_level_idxs,
def getAffineShape(self, final_resp, LAFs, final_pyr_idxs, final_level_idxs, num_features = 0):
pe_time = 0
affnet_time = 0
pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
t = time.time()
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS = self.AffNet.PS)
pe_time+=time.time() - t
t = time.time()
base_A = torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0),2,2)
if final_resp.is_cuda:
base_A = base_A.cuda()
base_A = Variable(base_A)
is_good = None
n_patches = patches_small.size(0)
for i in range(self.num_Baum_iters):
t = time.time()
A = batched_forward(self.AffNet, patches_small, 256)
is_good_current = 1
affnet_time += time.time() - t
if is_good is None:
is_good = is_good_current
else:
is_good = is_good * is_good_current
base_A = torch.bmm(A, base_A);
new_LAFs = torch.cat([torch.bmm(base_A,LAFs[:,:,0:2]), LAFs[:,:,2:] ], dim =2)
#print torch.sqrt(new_LAFs[0,0,0]*new_LAFs[0,1,1] - new_LAFs[0,1,0] *new_LAFs[0,0,1]) * scale_pyr[0][0].size(2)
if i != self.num_Baum_iters - 1:
pe_time+=time.time() - t
t = time.time()
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, new_LAFs, PS = self.AffNet.PS)
pe_time+= time.time() - t
l1,l2 = batch_eig2x2(A)
ratio1 = torch.abs(l1 / (l2 + 1e-8))
converged_mask = (ratio1 <= 1.2) * (ratio1 >= (0.8))
l1,l2 = batch_eig2x2(base_A)
ratio = torch.abs(l1 / (l2 + 1e-8))
idxs_mask = ((ratio < 6.0) * (ratio > (1./6.))) * checkTouchBoundary(new_LAFs)
num_survived = idxs_mask.float().sum()
if (num_features > 0) and (num_survived.data.item() > num_features):
final_resp = final_resp * idxs_mask.float() #zero bad points
final_resp, idxs = torch.topk(final_resp, k = num_features);
else:
idxs = Variable(torch.nonzero(idxs_mask.data).view(-1).long())
final_resp = final_resp[idxs]
final_pyr_idxs = final_pyr_idxs[idxs]
final_level_idxs = final_level_idxs[idxs]
base_A = torch.index_select(base_A, 0, idxs)
LAFs = torch.index_select(LAFs, 0, idxs)
new_LAFs = torch.cat([torch.bmm(base_A, LAFs[:,:,0:2]),
LAFs[:,:,2:]], dim =2)
new_LAFs[:, :, :2] *= self.patch_scale
#print ('affnet_time',affnet_time)
#print ('pe_time', pe_time)
return final_resp, new_LAFs, final_pyr_idxs, final_level_idxs
def getOrientation(self, LAFs, final_pyr_idxs, final_level_idxs):
pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS = self.OriNet.PS)
max_iters = 1
### Detect orientation
for i in range(max_iters):
angles = self.OriNet(patches_small)
if len(angles.size()) > 2:
LAFs = torch.cat([torch.bmm( LAFs[:,:,:2], angles), LAFs[:,:,2:]], dim = 2)
else:
LAFs = torch.cat([torch.bmm( LAFs[:,:,:2], angles2A(angles).view(-1,2,2)), LAFs[:,:,2:]], dim = 2)
if i != max_iters:
patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS = self.OriNet.PS)
return LAFs
def extract_patches_from_pyr(self, dLAFs, PS = 41):
pyr_idxs, level_idxs = get_pyramid_and_level_index_for_LAFs(dLAFs, self.sigmas, self.pix_dists, PS)
pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, pyr_idxs, level_idxs)
patches = extract_patches_from_pyramid_with_inv_index(self.scale_pyr,
pyr_inv_idxs,
normalizeLAFs(dLAFs, self.scale_pyr[0][0].size(3), self.scale_pyr[0][0].size(2)),
PS = PS)
return patches
def forward(self,x, do_ori = False):
### Detection
t = time.time()
num_features_prefilter = self.num
if self.num_Baum_iters > 0:
num_features_prefilter = int(1.5 * self.num);
responses, LAFs, final_pyr_idxs, final_level_idxs = self.multiScaleDetector(x,num_features_prefilter)
#print (time.time() - t, 'detection multiscale')
t = time.time()
LAFs[:,0:2,0:2] = self.mrSize * LAFs[:,:,0:2]
if self.num_Baum_iters > 0:
responses, LAFs, final_pyr_idxs, final_level_idxs = self.getAffineShape(responses, LAFs, final_pyr_idxs, final_level_idxs, self.num)
#print (time.time() - t, 'affine shape iters')
t = time.time()
if do_ori:
LAFs = self.getOrientation(LAFs, final_pyr_idxs, final_level_idxs)
#pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
#patches = extract_patches_from_pyramid_with_inv_index(scale_pyr, pyr_inv_idxs, LAFs, PS = self.PS)
#patches = extract_patches(x, LAFs, PS = self.PS)
#print time.time() - t, len(LAFs), ' patches extraction'
return denormalizeLAFs(LAFs, x.size(3), x.size(2)), responses
| 10,911 | 49.753488 | 231 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/ReprojectonStuff.py | import torch
from torch.autograd import Variable
import numpy as np
from LAF import rectifyAffineTransformationUpIsUp
from Utils import zeros_like
def distance_matrix_vector(anchor, positive):
"""Given batch of anchor descriptors and positive descriptors calculate distance matrix"""
d1_sq = torch.sum(anchor * anchor, dim=1)
d2_sq = torch.sum(positive * positive, dim=1)
eps = 1e-6
return torch.sqrt(torch.abs((d1_sq.expand(positive.size(0), anchor.size(0)) +
torch.t(d2_sq.expand(anchor.size(0), positive.size(0)))
- 2.0 * torch.bmm(positive.unsqueeze(0), torch.t(anchor).unsqueeze(0)).squeeze(0))+eps))
def LAFs_to_H_frames(aff_pts):
H3_x = torch.Tensor([0, 0, 1 ]).unsqueeze(0).unsqueeze(0).expand_as(aff_pts[:,0:1,:]);
if aff_pts.is_cuda:
H3_x = H3_x.cuda()
H3_x = torch.autograd.Variable(H3_x)
return torch.cat([aff_pts, H3_x], dim = 1)
def linH(H, x, y):
assert x.size(0) == y.size(0)
A = torch.zeros(x.size(0),2,2)
if x.is_cuda:
A = A.cuda()
A = Variable(A)
den = x * H[2,0] + y * H[2,1] + H[2,2]
num1_densq = (x*H[0,0] + y*H[0,1] + H[0,2]) / (den*den)
num2_densq = (x*H[1,0] + y*H[1,1] + H[1,2]) / (den*den)
A[:,0,0] = H[0,0]/den - num1_densq * H[2,0]
A[:,0,1] = H[0,1]/den - num1_densq * H[2,1]
A[:,1,0] = H[1,0]/den - num2_densq * H[2,0]
A[:,1,1] = H[1,1]/den - num2_densq * H[2,1]
return A
def reprojectLAFs(LAFs1, H1to2, return_LHFs = False):
LHF1 = LAFs_to_H_frames(LAFs1)
xy1 = torch.bmm(H1to2.expand(LHF1.size(0),3,3), LHF1[:,:,2:])
xy1 = xy1 / xy1[:,2:,:].expand(xy1.size(0), 3, 1)
As = linH(H1to2, LAFs1[:,0,2], LAFs1[:,1,2])
AF = torch.bmm(As, LHF1[:,0:2,0:2])
if return_LHFs:
return LAFs_to_H_frames(torch.cat([AF, xy1[:,:2,:]], dim = 2))
return torch.cat([AF, xy1[:,:2,:]], dim = 2)
def inverseLHFs(LHFs):
LHF1_inv =torch.zeros(LHFs.size())
if LHFs.is_cuda:
LHF1_inv = LHF1_inv.cuda()
LHF1_inv = torch.autograd.Variable(LHF1_inv);
for i in range(LHF1_inv.size(0)):
LHF1_inv[i,:,:] = LHFs[i,:,:].inverse()
return LHF1_inv
def reproject_to_canonical_Frob_batched(LHF1_inv, LHF2, batch_size = 2, skip_center = False):
out = torch.zeros((LHF1_inv.size(0), LHF2.size(0)))
eye1 = torch.eye(3)
if LHF1_inv.is_cuda:
out = out.cuda()
eye1 = eye1.cuda()
eye1 = torch.autograd.Variable(eye1)
out = torch.autograd.Variable(out)
len1 = LHF1_inv.size(0)
len2 = LHF2.size(0)
n_batches = int(np.floor(len1 / batch_size) + 1);
for b_idx in range(n_batches):
#print b_idx
start = b_idx * batch_size;
fin = min((b_idx+1) * batch_size, len1)
current_bs = fin - start
if current_bs == 0:
break
should_be_eyes = torch.bmm(LHF1_inv[start:fin, :, :].unsqueeze(0).expand(len2,current_bs, 3, 3).contiguous().view(-1,3,3),
LHF2.unsqueeze(1).expand(len2,current_bs, 3,3).contiguous().view(-1,3,3))
if skip_center:
out[start:fin, :] = torch.sum(((should_be_eyes - eye1.unsqueeze(0).expand_as(should_be_eyes))**2)[:,:2,:2] , dim=1).sum(dim = 1).view(current_bs, len2)
else:
out[start:fin, :] = torch.sum((should_be_eyes - eye1.unsqueeze(0).expand_as(should_be_eyes))**2 , dim=1).sum(dim = 1).view(current_bs, len2)
return out
def get_GT_correspondence_indexes(LAFs1, LAFs2, H1to2, dist_threshold = 4):
LHF2_in_1_pre = reprojectLAFs(LAFs2, torch.inverse(H1to2), True)
just_centers1 = LAFs1[:,:,2];
just_centers2_repr_to_1 = LHF2_in_1_pre[:,0:2,2];
dist = distance_matrix_vector(just_centers2_repr_to_1, just_centers1)
min_dist, idxs_in_2 = torch.min(dist,1)
plain_indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)),requires_grad = False)
if LAFs1.is_cuda:
plain_indxs_in1 = plain_indxs_in1.cuda()
mask = min_dist <= dist_threshold
return min_dist[mask], plain_indxs_in1[mask], idxs_in_2[mask]
def get_GT_correspondence_indexes_Fro(LAFs1,LAFs2, H1to2, dist_threshold = 4,
skip_center_in_Fro = False):
LHF2_in_1_pre = reprojectLAFs(LAFs2, torch.inverse(H1to2), True)
LHF1_inv = inverseLHFs(LAFs_to_H_frames(LAFs1))
frob_norm_dist = reproject_to_canonical_Frob_batched(LHF1_inv, LHF2_in_1_pre, batch_size = 2, skip_center = skip_center_in_Fro)
min_dist, idxs_in_2 = torch.min(frob_norm_dist,1)
plain_indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad = False)
if LAFs1.is_cuda:
plain_indxs_in1 = plain_indxs_in1.cuda()
#print min_dist.min(), min_dist.max(), min_dist.mean()
mask = min_dist <= dist_threshold
return min_dist[mask], plain_indxs_in1[mask], idxs_in_2[mask]
def get_GT_correspondence_indexes_Fro_and_center(LAFs1,LAFs2, H1to2, dist_threshold = 4, center_dist_th = 2.0,
skip_center_in_Fro = False, do_up_is_up = False, return_LAF2_in_1 = False):
LHF2_in_1_pre = reprojectLAFs(LAFs2, torch.inverse(H1to2), True)
if do_up_is_up:
sc = torch.sqrt(LHF2_in_1_pre[:,0,0] * LHF2_in_1_pre[:,1,1] - LHF2_in_1_pre[:,1,0] * LHF2_in_1_pre[:,0,1]).unsqueeze(-1).unsqueeze(-1).expand(LHF2_in_1_pre.size(0), 2,2)
LHF2_in_1 = torch.zeros(LHF2_in_1_pre.size())
if LHF2_in_1_pre.is_cuda:
LHF2_in_1 = LHF2_in_1.cuda()
LHF2_in_1 = Variable(LHF2_in_1)
LHF2_in_1[:, :2,:2] = rectifyAffineTransformationUpIsUp(LHF2_in_1_pre[:, :2,:2]/sc) * sc
LHF2_in_1[:,:, 2] = LHF2_in_1_pre[:,:,2]
else:
LHF2_in_1 = LHF2_in_1_pre
LHF1_inv = inverseLHFs(LAFs_to_H_frames(LAFs1))
frob_norm_dist = reproject_to_canonical_Frob_batched(LHF1_inv, LHF2_in_1, batch_size = 2, skip_center = skip_center_in_Fro)
#### Center replated
just_centers1 = LAFs1[:,:,2];
just_centers2_repr_to_1 = LHF2_in_1[:,0:2,2];
center_dist_mask = distance_matrix_vector(just_centers2_repr_to_1, just_centers1) >= center_dist_th
frob_norm_dist_masked = center_dist_mask.float() * 1000. + frob_norm_dist;
min_dist, idxs_in_2 = torch.min(frob_norm_dist_masked,1)
plain_indxs_in1 = torch.arange(0, idxs_in_2.size(0))
if LAFs1.is_cuda:
plain_indxs_in1 = plain_indxs_in1.cuda()
plain_indxs_in1 = torch.autograd.Variable(plain_indxs_in1, requires_grad = False)
#min_dist, idxs_in_2 = torch.min(dist,1)
#print min_dist.min(), min_dist.max(), min_dist.mean()
mask = (min_dist <= dist_threshold )
if return_LAF2_in_1:
return min_dist[mask], plain_indxs_in1[mask], idxs_in_2[mask], LHF2_in_1[:,0:2,:]
else:
return min_dist[mask], plain_indxs_in1[mask], idxs_in_2[mask]
| 6,844 | 45.25 | 177 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/Utils.py | import torch
import torch.nn.init
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import cv2
import numpy as np
# resize image to size 32x32
cv2_scale = lambda x: cv2.resize(x, dsize=(32, 32),
interpolation=cv2.INTER_LINEAR)
# reshape image
np_reshape32 = lambda x: np.reshape(x, (32, 32, 1))
np_reshape64 = lambda x: np.reshape(x, (64, 64, 1))
def zeros_like(x):
assert x.__class__.__name__.find('Variable') != -1 or x.__class__.__name__.find('Tensor') != -1, "Object is neither a Tensor nor a Variable"
y = torch.zeros(x.size())
if x.is_cuda:
y = y.cuda()
if x.__class__.__name__ == 'Variable':
return torch.autograd.Variable(y, requires_grad=x.requires_grad)
elif x.__class__.__name__.find('Tensor') != -1:
return torch.zeros(y)
def ones_like(x):
assert x.__class__.__name__.find('Variable') != -1 or x.__class__.__name__.find('Tensor') != -1, "Object is neither a Tensor nor a Variable"
y = torch.ones(x.size())
if x.is_cuda:
y = y.cuda()
if x.__class__.__name__ == 'Variable':
return torch.autograd.Variable(y, requires_grad=x.requires_grad)
elif x.__class__.__name__.find('Tensor') != -1:
return torch.ones(y)
def batched_forward(model, data, batch_size, **kwargs):
n_patches = len(data)
if n_patches > batch_size:
bs = batch_size
n_batches = int(n_patches / bs + 1)
for batch_idx in range(n_batches):
st = batch_idx * bs
if batch_idx == n_batches - 1:
if (batch_idx + 1) * bs > n_patches:
end = n_patches
else:
end = (batch_idx + 1) * bs
else:
end = (batch_idx + 1) * bs
if st >= end:
continue
if batch_idx == 0:
first_batch_out = model(data[st:end], kwargs)
out_size = torch.Size([n_patches] + list(first_batch_out.size()[1:]))
#out_size[0] = n_patches
out = torch.zeros(out_size);
if data.is_cuda:
out = out.cuda()
out = Variable(out)
out[st:end] = first_batch_out
else:
out[st:end,:,:] = model(data[st:end], kwargs)
return out
else:
return model(data, kwargs)
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sum(torch.abs(x), dim = 1) + self.eps
x= x / norm.expand_as(x)
return x
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
def CircularGaussKernel(kernlen=None, circ_zeros = False, sigma = None, norm = True):
assert ((kernlen is not None) or sigma is not None)
if kernlen is None:
kernlen = int(2.0 * 3.0 * sigma + 1.0)
if (kernlen % 2 == 0):
kernlen = kernlen + 1;
halfSize = kernlen / 2;
halfSize = kernlen / 2;
r2 = float(halfSize*halfSize)
if sigma is None:
sigma2 = 0.9 * r2;
sigma = np.sqrt(sigma2)
else:
sigma2 = 2.0 * sigma * sigma
x = np.linspace(-halfSize,halfSize,kernlen)
xv, yv = np.meshgrid(x, x, sparse=False, indexing='xy')
distsq = (xv)**2 + (yv)**2
kernel = np.exp(-( distsq/ (sigma2)))
if circ_zeros:
kernel *= (distsq <= r2).astype(np.float32)
if norm:
kernel /= np.sum(kernel)
return kernel
def generate_2dgrid(h,w, centered = True):
if centered:
x = torch.linspace(-w/2+1, w/2, w)
y = torch.linspace(-h/2+1, h/2, h)
else:
x = torch.linspace(0, w-1, w)
y = torch.linspace(0, h-1, h)
grid2d = torch.stack([y.repeat(w,1).t().contiguous().view(-1), x.repeat(h)],1)
return grid2d
def generate_3dgrid(d, h, w, centered = True):
if type(d) is not list:
if centered:
z = torch.linspace(-d/2+1, d/2, d)
else:
z = torch.linspace(0, d-1, d)
dl = d
else:
z = torch.FloatTensor(d)
dl = len(d)
grid2d = generate_2dgrid(h,w, centered = centered)
grid3d = torch.cat([z.repeat(w*h,1).t().contiguous().view(-1,1), grid2d.repeat(dl,1)],dim = 1)
return grid3d
def zero_response_at_border(x, b):
if (b < x.size(3)) and (b < x.size(2)):
x[:, :, 0:b, :] = 0
x[:, :, x.size(2) - b: , :] = 0
x[:, :, :, 0:b] = 0
x[:, :, :, x.size(3) - b: ] = 0
else:
return x * 0
return x
class GaussianBlur(nn.Module):
def __init__(self, sigma=1.6):
super(GaussianBlur, self).__init__()
weight = self.calculate_weights(sigma)
self.register_buffer('buf', weight)
return
def calculate_weights(self, sigma):
kernel = CircularGaussKernel(sigma = sigma, circ_zeros = False)
h,w = kernel.shape
halfSize = float(h) / 2.;
self.pad = int(np.floor(halfSize))
return torch.from_numpy(kernel.astype(np.float32)).view(1,1,h,w);
def forward(self, x):
w = Variable(self.buf)
if x.is_cuda:
w = w.cuda()
return F.conv2d(F.pad(x, (self.pad,self.pad,self.pad,self.pad), 'replicate'), w, padding = 0)
def batch_eig2x2(A):
trace = A[:,0,0] + A[:,1,1]
delta1 = (trace*trace - 4 * ( A[:,0,0]* A[:,1,1] - A[:,1,0]* A[:,0,1]))
mask = delta1 > 0
delta = torch.sqrt(torch.abs(delta1))
l1 = mask.float() * (trace + delta) / 2.0 + 1000. * (1.0 - mask.float())
l2 = mask.float() * (trace - delta) / 2.0 + 0.0001 * (1.0 - mask.float())
return l1,l2
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
return
| 6,244 | 33.125683 | 144 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/architectures.py | from __future__ import division, print_function
import os
import errno
import numpy as np
import sys
from copy import deepcopy
import math
import torch
import torch.nn.init
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.autograd import Variable
from Utils import L2Norm, generate_2dgrid
from Utils import str2bool
from LAF import denormalizeLAFs, LAFs2ell, abc2A, extract_patches,normalizeLAFs, get_rotation_matrix
from LAF import get_LAFs_scales, get_normalized_affine_shape
from LAF import rectifyAffineTransformationUpIsUp,rectifyAffineTransformationUpIsUpFullyConv
class LocalNorm2d(nn.Module):
def __init__(self, kernel_size = 33):
super(LocalNorm2d, self).__init__()
self.ks = kernel_size
self.pool = nn.AvgPool2d(kernel_size = self.ks, stride = 1, padding = 0)
self.eps = 1e-10
return
def forward(self,x):
pd = int(self.ks/2)
mean = self.pool(F.pad(x, (pd,pd,pd,pd), 'reflect'))
return torch.clamp((x - mean) / (torch.sqrt(torch.abs(self.pool(F.pad(x*x, (pd,pd,pd,pd), 'reflect')) - mean*mean )) + self.eps), min = -6.0, max = 6.0)
class OriNetFast(nn.Module):
def __init__(self, PS = 16):
super(OriNetFast, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 2, kernel_size=int(PS/4), stride=1,padding=1, bias = True),
nn.Tanh(),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/4)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1)
sp = torch.std(flat, dim=1) + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.9)
try:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_rot_matrix = True):
xy = self.features(self.input_norm(input)).view(-1,2)
angle = torch.atan2(xy[:,0] + 1e-8, xy[:,1]+1e-8);
if return_rot_matrix:
return get_rotation_matrix(angle)
return angle
class GHH(nn.Module):
def __init__(self, n_in, n_out, s = 4, m = 4):
super(GHH, self).__init__()
self.n_out = n_out
self.s = s
self.m = m
self.conv = nn.Linear(n_in, n_out * s * m)
d = torch.arange(0, s)
self.deltas = -1.0 * (d % 2 != 0).float() + 1.0 * (d % 2 == 0).float()
self.deltas = Variable(self.deltas)
return
def forward(self,x):
x_feats = self.conv(x.view(x.size(0),-1)).view(x.size(0), self.n_out, self.s, self.m);
max_feats = x_feats.max(dim = 3)[0];
if x.is_cuda:
self.deltas = self.deltas.cuda()
else:
self.deltas = self.deltas.cpu()
out = (max_feats * self.deltas.view(1,1,-1).expand_as(max_feats)).sum(dim = 2)
return out
class YiNet(nn.Module):
def __init__(self, PS = 28):
super(YiNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5, padding=0, bias = True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding = 1),
nn.Conv2d(10, 20, kernel_size=5, stride=1, padding=0, bias = True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=4, stride=2, padding = 2),
nn.Conv2d(20, 50, kernel_size=3, stride=1, padding=0, bias = True),
nn.ReLU(),
nn.AdaptiveMaxPool2d(1),
GHH(50, 100),
GHH(100, 2)
)
self.input_mean = 0.427117081207483
self.input_std = 0.21888339179665006;
self.PS = PS
return
def import_weights(self, dir_name):
self.features[0].weight.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer0_W.npy'))).float()
self.features[0].bias.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer0_b.npy'))).float().view(-1)
self.features[3].weight.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer1_W.npy'))).float()
self.features[3].bias.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer1_b.npy'))).float().view(-1)
self.features[6].weight.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer2_W.npy'))).float()
self.features[6].bias.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer2_b.npy'))).float().view(-1)
self.features[9].conv.weight.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer3_W.npy'))).float().view(50, 1600).contiguous().t().contiguous()
self.features[9].conv.bias.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer3_b.npy'))).float().view(1600)
self.features[10].conv.weight.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer4_W.npy'))).float().view(100, 32).contiguous().t().contiguous()
self.features[10].conv.bias.data = torch.from_numpy(np.load(os.path.join(dir_name, 'layer4_b.npy'))).float().view(32)
self.input_mean = float(np.load(os.path.join(dir_name, 'input_mean.npy')))
self.input_std = float(np.load(os.path.join(dir_name, 'input_std.npy')))
return
def input_norm1(self,x):
return (x - self.input_mean) / self.input_std
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1)
sp = torch.std(flat, dim=1) + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def forward(self, input, return_rot_matrix = False):
xy = self.features(self.input_norm(input))
angle = torch.atan2(xy[:,0] + 1e-8, xy[:,1]+1e-8);
if return_rot_matrix:
return get_rotation_matrix(-angle)
return angle
class AffNetFast4(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast4, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 4, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
# print ('last layer init bias')
m.bias.data = torch.FloatTensor([1,0,0,1])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
xy = self.features(self.input_norm(input)).view(-1,2,2).contiguous()
return rectifyAffineTransformationUpIsUp(xy).contiguous()
class AffNetFast(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 3, kernel_size=8, stride=1, padding=0, bias = True),
nn.Tanh(),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal_(m.weight.data, gain=0.8)
try:
nn.init.constant_(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
xy = self.features(self.input_norm(input)).view(-1,3)
a1 = torch.cat([1.0 + xy[:,0].contiguous().view(-1,1,1), 0 * xy[:,0].contiguous().view(-1,1,1)], dim = 2).contiguous()
a2 = torch.cat([xy[:,1].contiguous().view(-1,1,1), 1.0 + xy[:,2].contiguous().view(-1,1,1)], dim = 2).contiguous()
return rectifyAffineTransformationUpIsUp(torch.cat([a1,a2], dim = 1).contiguous())
class AffNetFast52RotUp(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast52RotUp, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 5, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
# print ('last layer init bias')
m.bias.data = torch.FloatTensor([1,0, 1, 0, 1])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
x = self.features(self.input_norm(input)).view(-1,5)
angle = torch.atan2(x[:,3], x[:,4]+1e-8);
rot = get_rotation_matrix(angle)
return torch.bmm(rot, rectifyAffineTransformationUpIsUp(torch.cat([torch.cat([x[:,0:1].view(-1,1,1), x[:,1:2].view(x.size(0),1,1).contiguous()], dim = 2), x[:,1:3].view(-1,1,2).contiguous()], dim = 1)).contiguous())
class AffNetFast52Rot(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast52Rot, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 5, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1),
nn.Tanh()
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
# print ('last layer init bias')
m.bias.data = torch.FloatTensor([0.8,0, 0.8, 0, 1])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
x = self.features(self.input_norm(input)).view(-1,5)
angle = torch.atan2(x[:,3], x[:,4]+1e-8);
rot = get_rotation_matrix(angle)
return torch.bmm(rot, torch.cat([torch.cat([x[:,0:1].view(-1,1,1), x[:,1:2].view(x.size(0),1,1).contiguous()], dim = 2), x[:,1:3].view(-1,1,2).contiguous()], dim = 1))
class AffNetFast5Rot(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast5Rot, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 5, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
# print ('last layer init bias')
m.bias.data = torch.FloatTensor([1,0, 1, 0, 1])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
x = self.features(self.input_norm(input)).view(-1,5)
rot = get_rotation_matrix(torch.atan2(x[:,3], x[:,4]+1e-8))
if input.is_cuda:
return torch.bmm(rot, torch.cat([torch.cat([x[:,0:1].view(-1,1,1), torch.zeros(x.size(0),1,1).cuda()], dim = 2), x[:,1:3].view(-1,1,2).contiguous()], dim = 1))
else:
return torch.bmm(rot, torch.cat([torch.cat([x[:,0:1].view(-1,1,1), torch.zeros(x.size(0),1,1)], dim = 2), x[:,1:3].view(-1,1,2).contiguous()], dim = 1))
class AffNetFast4Rot(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast4Rot, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 4, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1),
nn.Tanh()
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
print ('last layer init bias')
m.bias.data = torch.FloatTensor([0.8,0,0,0.8])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
return self.features(self.input_norm(input)).view(-1,2,2).contiguous()
class AffNetFast4RotNosc(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast4RotNosc, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 4, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
print ('last layer init bias')
m.bias.data = torch.FloatTensor([1,0,0,1])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
A = self.features(self.input_norm(input)).view(-1,2,2).contiguous()
scale = torch.sqrt(torch.abs(A[:,0,0]*A[:,1,1] - A[:,1,0]*A[:,0,1] + 1e-10))
return A / (scale.view(-1,1,1).repeat(1,2,2) + 1e-8)
class AffNetFastScale(nn.Module):
def __init__(self, PS = 32):
super(AffNetFastScale, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 4, kernel_size=8, stride=1, padding=0, bias = True),
nn.Tanh(),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
xy = self.features(self.input_norm(input)).view(-1,4)
a1 = torch.cat([1.0 + xy[:,0].contiguous().view(-1,1,1), 0 * xy[:,0].contiguous().view(-1,1,1)], dim = 2).contiguous()
a2 = torch.cat([xy[:,1].contiguous().view(-1,1,1), 1.0 + xy[:,2].contiguous().view(-1,1,1)], dim = 2).contiguous()
scale = torch.exp(xy[:,3].contiguous().view(-1,1,1).repeat(1,2,2))
return scale * rectifyAffineTransformationUpIsUp(torch.cat([a1,a2], dim = 1).contiguous())
class AffNetFast2Par(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast2Par, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 3, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
print ('last layer init bias')
m.bias.data = torch.FloatTensor([0, 0, 1 ])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
x = self.features(self.input_norm(input)).view(-1,3)
angle = torch.atan2(x[:,1], x[:,2]+1e-8);
rot = get_rotation_matrix(angle)
tilt = torch.exp(1.8 * F.tanh(x[:,0]))
tilt_matrix = torch.eye(2).unsqueeze(0).repeat(input.size(0),1,1)
if x.is_cuda:
tilt_matrix = tilt_matrix.cuda()
tilt_matrix[:,0,0] = torch.sqrt(tilt)
tilt_matrix[:,1,1] = 1.0 / torch.sqrt(tilt)
return rectifyAffineTransformationUpIsUp(torch.bmm(rot, tilt_matrix)).contiguous()
class AffNetFastFullConv(nn.Module):
def __init__(self, PS = 32, stride = 2):
super(AffNetFastFullConv, self).__init__()
self.lrn = LocalNorm2d(33)
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=stride, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=stride, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 3, kernel_size=8, stride=1, padding = 0, bias = True),
)
self.stride = stride
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
norm_inp = self.lrn(input)
ff = self.features(F.pad(norm_inp, (14,14,14,14), 'reflect'))
xy = F.tanh(F.upsample(ff, (input.size(2), input.size(3)),mode='bilinear'))
a0bc = torch.cat([1.0 + xy[:,0:1,:,:].contiguous(), 0*xy[:,1:2,:,:].contiguous(),
xy[:,1:2,:,:].contiguous(), 1.0 + xy[:,2:,:,:].contiguous()], dim = 1).contiguous()
return rectifyAffineTransformationUpIsUpFullyConv(a0bc).contiguous()
class AffNetFast52RotL(nn.Module):
def __init__(self, PS = 32):
super(AffNetFast52RotL, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 5, kernel_size=8, stride=1, padding=0, bias = True),
nn.AdaptiveAvgPool2d(1),
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
print ('last layer init bias')
m.bias.data = torch.FloatTensor([0.8,0, 0.8, 0, 1])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
x = self.features(self.input_norm(input)).view(-1,5)
angle = torch.atan2(x[:,3], x[:,4]+1e-8);
rot = get_rotation_matrix(angle)
return torch.bmm(rot, torch.cat([torch.cat([x[:,0:1].view(-1,1,1), x[:,1:2].view(x.size(0),1,1).contiguous()], dim = 2), x[:,1:3].view(-1,1,2).contiguous()], dim = 1))
class AffNetFastBias(nn.Module):
def __init__(self, PS = 32):
super(AffNetFastBias, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(16, affine=False),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Dropout(0.25),
nn.Conv2d(64, 3, kernel_size=8, stride=1, padding=0, bias = True),
nn.Tanh(),
nn.AdaptiveAvgPool2d(1)
)
self.PS = PS
self.features.apply(self.weights_init)
self.halfPS = int(PS/2)
return
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1).detach()
sp = torch.std(flat, dim=1).detach() + 1e-7
return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def weights_init(self,m):
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.8)
try:
if m.weight.data.shape[-1] == 8: #last layer:
nn.init.orthogonal(m.weight.data, gain=1.0)
print ('last layer init bias')
m.bias.data = torch.FloatTensor([0.8, 0, 0.8 ])
else:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
def forward(self, input, return_A_matrix = False):
xy = self.features(self.input_norm(input)).view(-1,3)
a1 = torch.cat([xy[:,0].contiguous().view(-1,1,1), 0 * xy[:,0].contiguous().view(-1,1,1)], dim = 2).contiguous()
a2 = torch.cat([xy[:,1].contiguous().view(-1,1,1), xy[:,2].contiguous().view(-1,1,1)], dim = 2).contiguous()
return rectifyAffineTransformationUpIsUp(torch.cat([a1,a2], dim = 1).contiguous())
| 36,272 | 45.32567 | 223 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/extract_features_oxaff.py | import torch
import torch.nn as nn
import numpy as np
import sys
import time
from PIL import Image
from torch.autograd import Variable
from SparseImgRepresenter import ScaleSpaceAffinePatchExtractor
from LAF import denormalizeLAFs, LAFs2ell
from Utils import line_prepender
USE_CUDA = False
try:
input_img_fname = sys.argv[1]
output_fname = sys.argv[2]
nfeats = int(sys.argv[3])
except:
print("Wrong input format. Try python extract_features_oxaff.py graf1.ppm out.txt 2000")
sys.exit(1)
img = Image.open(input_img_fname).convert('RGB')
img = np.mean(np.array(img), axis = 2)
var_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)))
var_image = var_image.view(1, 1, var_image.size(0),var_image.size(1))
HA = ScaleSpaceAffinePatchExtractor(mrSize = 5.192, num_features = nfeats, border = 5, num_Baum_iters = 16)
if USE_CUDA:
HA = HA.cuda()
var_image = var_image.cuda()
LAFs, resp = HA(var_image)
ells = LAFs2ell(LAFs.data.cpu().numpy())
np.savetxt(output_fname, ells, delimiter=' ', fmt='%10.10f')
line_prepender(output_fname, str(len(ells)))
line_prepender(output_fname, '1.0') | 1,140 | 27.525 | 107 | py |
NORPPA | NORPPA-main/reidentification/hesaff_pytorch/pytorch_sift.py | import torch
import math
import torch.nn.init
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import numpy as np
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sum(torch.abs(x), dim = 1) + self.eps
x= x / norm.expand_as(x)
return x
class my_atan2(torch.autograd.Function):
def forward(self, input1,input2):
self.save_for_backward(input1, input2)
return torch.atan2(input1,input2)
def backward(self, grad_output):
y,x = self.saved_tensors
xsq_ysq = x*x + y*y
return grad_output * x/xsq_ysq, grad_output *-(y/xsq_ysq)
def getPoolingKernel(kernel_size = 25):
step = 1. / float(np.floor( kernel_size / 2.));
x_coef = np.arange(step/2., 1. ,step)
xc2 = np.hstack([x_coef,[1], x_coef[::-1]])
kernel = np.outer(xc2.T,xc2)
kernel = np.maximum(0,kernel)
return kernel
def get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins):
bin_weight_stride = int(round(2.0 * math.floor(patch_size / 2) / float(num_spatial_bins + 1)))
bin_weight_kernel_size = int(2 * bin_weight_stride - 1);
return bin_weight_kernel_size, bin_weight_stride
class SIFTNet(nn.Module):
def CircularGaussKernel(self,kernlen=21):
halfSize = kernlen / 2;
r2 = float(halfSize*halfSize);
sigma2 = 0.9 * r2;
disq = 0;
kernel = np.zeros((kernlen,kernlen))
for y in range(kernlen):
for x in range(kernlen):
disq = (y - halfSize)*(y - halfSize) + (x - halfSize)*(x - halfSize);
if disq < r2:
kernel[y,x] = math.exp(-disq / sigma2)
else:
kernel[y,x] = 0.
return kernel
def __init__(self, patch_size = 65, num_ang_bins = 8, num_spatial_bins = 4, clipval = 0.2, do_cuda = False):
super(SIFTNet, self).__init__()
gk = torch.from_numpy(self.CircularGaussKernel(kernlen=patch_size).astype(np.float32))
self.bin_weight_kernel_size, self.bin_weight_stride = get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins)
self.gk = Variable(gk, volatile=True)
if do_cuda:
self.gk = self.gk.cuda()
self.num_ang_bins = num_ang_bins
self.num_spatial_bins = num_spatial_bins
self.clipval = clipval
self.gx = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(1,3), padding = (0,1), bias = False))
for l in self.gx:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1, 0, 1]]]], dtype=np.float32))
self.gy = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(3,1), padding = (1,0), bias = False))
for l in self.gy:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1], [0], [1]]]], dtype=np.float32))
self.pk = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(self.bin_weight_kernel_size, self.bin_weight_kernel_size),
stride = (self.bin_weight_stride, self.bin_weight_stride),
bias = False))
for l in self.pk:
if isinstance(l, nn.Conv2d):
nw = getPoolingKernel(kernel_size = self.bin_weight_kernel_size)
new_weights = np.array(nw.reshape((1, 1, self.bin_weight_kernel_size, self.bin_weight_kernel_size)))
l.weight.data = torch.from_numpy(new_weights.astype(np.float32))
def forward(self, x):
gx = self.gx(x)
gy = self.gy(x)
mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
ori = my_atan2()(gy,gx)
mag = mag * self.gk.expand_as(mag)
o_big = (ori +2.0 * math.pi )/ (2.0 * math.pi) * float(self.num_ang_bins)
bo0_big = torch.floor(o_big)
wo1_big = o_big - bo0_big
bo0_big = bo0_big % self.num_ang_bins
bo1_big = (bo0_big + 1) % self.num_ang_bins
wo0_big = (1.0 - wo1_big) * mag
wo1_big = wo1_big * mag
ang_bins = []
for i in range(0, self.num_ang_bins):
ang_bins.append(self.pk((bo0_big == i).float() * wo0_big + (bo1_big == i).float() * wo1_big))
ang_bins = torch.cat(ang_bins,1)
ang_bins = ang_bins.view(ang_bins.size(0), -1)
ang_bins = L2Norm()(ang_bins)
ang_bins = torch.clamp(ang_bins, 0.,float(self.clipval))
ang_bins = L2Norm()(ang_bins)
return ang_bins
| 4,815 | 42.781818 | 129 | py |
NORPPA | NORPPA-main/pattern_extraction/model.py | import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras import backend as keras
def unet(pretrained_weights = None,input_size = (512,512,1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
#model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
| 3,797 | 56.545455 | 132 | py |
robust-transformers | robust-transformers-main/conftest.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_configure(config):
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipeline are tested")
config.addinivalue_line(
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested"
)
config.addinivalue_line(
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested"
)
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment")
def pytest_addoption(parser):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from transformers.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
def pytest_sessionfinish(session, exitstatus):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
session.exitstatus = 0
# Doctest custom flag to ignore output.
IGNORE_RESULT = doctest.register_optionflag('IGNORE_RESULT')
OutputChecker = doctest.OutputChecker
class CustomOutputChecker(OutputChecker):
def check_output(self, want, got, optionflags):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, want, got, optionflags)
doctest.OutputChecker = CustomOutputChecker
| 2,846 | 35.037975 | 107 | py |
robust-transformers | robust-transformers-main/setup.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the
documentation.
2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid.
3. Unpin specific versions from setup.py that use a git install.
4. Commit these changes with the message: "Release: <VERSION>" and push.
5. Wait for the tests on master to be completed and be green (otherwise revert and fix bugs)
6. Add a tag in git to mark the release: "git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' "
Push the tag to git: git push --tags origin master
7. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
8. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
Check you can run the following commands:
python -c "from transformers import pipeline; classifier = pipeline('text-classification'); print(classifier('What a nice release'))"
python -c "from transformers import *"
9. Upload the final version to actual pypi:
twine upload dist/* -r pypi
10. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release,
you need to go back to master before executing this.
"""
import os
import re
import shutil
from distutils.core import Command
from pathlib import Path
from setuptools import find_packages, setup
# Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
# IMPORTANT:
# 1. all dependencies should be listed here with their version requirements if any
# 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py
_deps = [
"Pillow",
"black~=22.0",
"codecarbon==1.2.0",
"cookiecutter==1.7.2",
"dataclasses",
"datasets",
"deepspeed>=0.5.9",
"fairscale>0.3",
"faiss-cpu",
"fastapi",
"filelock",
"flake8>=3.8.3",
"flax>=0.3.5",
"ftfy",
"fugashi>=1.0",
"GitPython<3.1.19",
"huggingface-hub>=0.1.0,<1.0",
"importlib_metadata",
"ipadic>=1.0.0,<2.0",
"isort>=5.5.4",
"jax>=0.2.8",
"jaxlib>=0.1.65",
"jieba",
"nltk",
"numpy>=1.17",
"onnxconverter-common",
"onnxruntime-tools>=1.4.2",
"onnxruntime>=1.4.0",
"optuna",
"optax>=0.0.8",
"packaging>=20.0",
"parameterized",
"phonemizer",
"protobuf",
"psutil",
"pyyaml>=5.1",
"pydantic",
"pytest",
"pytest-timeout",
"pytest-xdist",
"python>=3.6.0",
"ray[tune]",
"regex!=2019.12.17",
"requests",
"rouge-score",
"sacrebleu>=1.4.12,<2.0.0",
"sacremoses",
"sagemaker>=2.31.0",
"scikit-learn",
"sentencepiece>=0.1.91,!=0.1.92",
"sigopt",
"librosa",
"starlette",
"tensorflow-cpu>=2.3",
"tensorflow>=2.3",
"tf2onnx",
"timeout-decorator",
"timm",
"tokenizers>=0.11.1,!=0.11.3",
"torch>=1.0",
"torchaudio",
"pyctcdecode>=0.3.0",
"tqdm>=4.27",
"unidic>=1.0.2",
"unidic_lite>=1.0.7",
"uvicorn",
]
# this is a lookup table with items like:
#
# tokenizers: "tokenizers==0.9.4"
# packaging: "packaging"
#
# some of the values are versioned whereas others aren't.
deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)}
# since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from
# anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with:
#
# python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
#
# Just pass the desired package names to that script as it's shown with 2 packages above.
#
# If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
#
# You can then feed this for example to `pip`:
#
# pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets)
#
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs]
class DepsTableUpdateCommand(Command):
"""
A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
"""
description = "build runtime dependency table"
user_options = [
# format: (long option, short option, description).
("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
content = [
"# THIS FILE HAS BEEN AUTOGENERATED. To update:",
"# 1. modify the `_deps` dict in setup.py",
"# 2. run `make deps_table_update``",
"deps = {",
entries,
"}",
"",
]
target = "src/transformers/dependency_versions_table.py"
print(f"updating {target}")
with open(target, "w", encoding="utf-8", newline="\n") as f:
f.write("\n".join(content))
extras = {}
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic")
extras["sklearn"] = deps_list("scikit-learn")
extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "tf2onnx")
extras["tf-cpu"] = deps_list("tensorflow-cpu", "onnxconverter-common", "tf2onnx")
extras["torch"] = deps_list("torch")
if os.name == "nt": # windows
extras["retrieval"] = deps_list("datasets") # faiss is not supported on windows
extras["flax"] = [] # jax is not supported on windows
else:
extras["retrieval"] = deps_list("faiss-cpu", "datasets")
extras["flax"] = deps_list("jax", "jaxlib", "flax", "optax")
extras["tokenizers"] = deps_list("tokenizers")
extras["ftfy"] = deps_list("ftfy")
extras["onnxruntime"] = deps_list("onnxruntime", "onnxruntime-tools")
extras["onnx"] = deps_list("onnxconverter-common", "tf2onnx") + extras["onnxruntime"]
extras["modelcreation"] = deps_list("cookiecutter")
extras["sagemaker"] = deps_list("sagemaker")
extras["deepspeed"] = deps_list("deepspeed")
extras["fairscale"] = deps_list("fairscale")
extras["optuna"] = deps_list("optuna")
extras["ray"] = deps_list("ray[tune]")
extras["sigopt"] = deps_list("sigopt")
extras["integrations"] = extras["optuna"] + extras["ray"] + extras["sigopt"]
extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette")
extras["audio"] = deps_list("librosa", "pyctcdecode", "phonemizer")
# `pip install ".[speech]"` is deprecated and `pip install ".[torch-speech]"` should be used instead
extras["speech"] = deps_list("torchaudio") + extras["audio"]
extras["torch-speech"] = deps_list("torchaudio") + extras["audio"]
extras["tf-speech"] = extras["audio"]
extras["flax-speech"] = extras["audio"]
extras["vision"] = deps_list("Pillow")
extras["timm"] = deps_list("timm")
extras["codecarbon"] = deps_list("codecarbon")
extras["sentencepiece"] = deps_list("sentencepiece", "protobuf")
extras["testing"] = (
deps_list(
"pytest",
"pytest-xdist",
"timeout-decorator",
"parameterized",
"psutil",
"datasets",
"pytest-timeout",
"black",
"sacrebleu",
"rouge-score",
"nltk",
"GitPython",
)
+ extras["retrieval"]
+ extras["modelcreation"]
)
extras["quality"] = deps_list("black", "isort", "flake8", "GitPython")
extras["all"] = (
extras["tf"]
+ extras["torch"]
+ extras["flax"]
+ extras["sentencepiece"]
+ extras["tokenizers"]
+ extras["torch-speech"]
+ extras["vision"]
+ extras["integrations"]
+ extras["timm"]
+ extras["codecarbon"]
)
# Might need to add doc-builder and some specific deps in the future
extras["docs_specific"] = []
# "docs" needs "all" to resolve all the references
extras["docs"] = extras["all"] + extras["docs_specific"]
extras["dev-torch"] = (
extras['testing']
+ extras['torch']
+ extras["sentencepiece"]
+ extras["tokenizers"]
+ extras["torch-speech"]
+ extras["vision"]
+ extras["integrations"]
+ extras["timm"]
+ extras["codecarbon"]
+ extras["quality"]
+ extras["ja"]
+ extras["docs_specific"]
+ extras["sklearn"]
+ extras["modelcreation"]
+ extras["onnxruntime"]
)
extras["dev-tensorflow"] = (
extras['testing']
+ extras['tf']
+ extras["sentencepiece"]
+ extras["tokenizers"]
+ extras["vision"]
+ extras["quality"]
+ extras["docs_specific"]
+ extras["sklearn"]
+ extras["modelcreation"]
+ extras["onnx"]
+ extras["tf-speech"]
)
extras["dev"] = (
extras["all"]
+ extras["testing"]
+ extras["quality"]
+ extras["ja"]
+ extras["docs_specific"]
+ extras["sklearn"]
+ extras["modelcreation"]
)
extras["torchhub"] = deps_list(
"filelock",
"huggingface-hub",
"importlib_metadata",
"numpy",
"packaging",
"protobuf",
"regex",
"requests",
"sacremoses",
"sentencepiece",
"torch",
"tokenizers",
"tqdm",
)
# when modifying the following list, make sure to update src/transformers/dependency_versions_check.py
install_requires = [
deps["dataclasses"] + ";python_version<'3.7'", # dataclasses for Python versions that don't have it
deps["importlib_metadata"] + ";python_version<'3.8'", # importlib_metadata for Python versions that don't have it
deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads
deps["huggingface-hub"],
deps["numpy"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["pyyaml"], # used for the model cards metadata
deps["regex"], # for OpenAI GPT
deps["requests"], # for downloading models over HTTPS
deps["sacremoses"], # for XLM
deps["tokenizers"],
deps["tqdm"], # progress bars in model download and training scripts
]
setup(
name="transformers",
version="4.18.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Suraj Patil, Stas Bekman, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="thomas@huggingface.co",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
package_data={"transformers": ["py.typed"]},
zip_safe=False,
extras_require=extras,
entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]},
python_requires=">=3.6.0",
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
cmdclass={"deps_table_update": DepsTableUpdateCommand},
)
| 14,253 | 33.019093 | 259 | py |
robust-transformers | robust-transformers-main/hubconf.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
SRC_DIR = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
dependencies = ["torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub"]
@add_start_docstrings(AutoConfig.__doc__)
def config(*args, **kwargs):
r"""
# Using torch.hub !
import torch
config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from huggingface.co and cache.
config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json')
config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True)
assert config.output_attentions == True
assert unused_kwargs == {'foo': False}
"""
return AutoConfig.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoTokenizer.__doc__)
def tokenizer(*args, **kwargs):
r"""
# Using torch.hub !
import torch
tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from huggingface.co and cache.
tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
"""
return AutoTokenizer.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModel.__doc__)
def model(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache.
model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModel.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModelForCausalLM.__doc__)
def modelForCausalLM(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2') # Download model and configuration from huggingface.co and cache.
model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_pretrained('./tf_model/gpt_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './tf_model/gpt_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModelForCausalLM.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModelForMaskedLM.__doc__)
def modelForMaskedLM(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache.
model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModelForMaskedLM.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModelForSequenceClassification.__doc__)
def modelForSequenceClassification(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache.
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs)
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__)
def modelForQuestionAnswering(*args, **kwargs):
r"""
# Using torch.hub !
import torch
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache.
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)
| 8,496 | 51.450617 | 189 | py |
robust-transformers | robust-transformers-main/examples/research_projects/longform-qa/eli5_app.py | import datasets
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
import faiss
import transformers
from eli5_utils import (
embed_questions_for_retrieval,
make_qa_s2s_model,
qa_s2s_generate,
query_es_index,
query_qa_dense_index,
)
from transformers import AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer
MODEL_TYPE = "bart"
LOAD_DENSE_INDEX = True
@st.cache(allow_output_mutation=True)
def load_models():
if LOAD_DENSE_INDEX:
qar_tokenizer = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased")
qar_model = AutoModel.from_pretrained("yjernite/retribert-base-uncased").to("cuda:0")
_ = qar_model.eval()
else:
qar_tokenizer, qar_model = (None, None)
if MODEL_TYPE == "bart":
s2s_tokenizer = AutoTokenizer.from_pretrained("yjernite/bart_eli5")
s2s_model = AutoModelForSeq2SeqLM.from_pretrained("yjernite/bart_eli5").to("cuda:0")
save_dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth")
s2s_model.load_state_dict(save_dict["model"])
_ = s2s_model.eval()
else:
s2s_tokenizer, s2s_model = make_qa_s2s_model(
model_name="t5-small", from_file="seq2seq_models/eli5_t5_model_1024_4.pth", device="cuda:0"
)
return (qar_tokenizer, qar_model, s2s_tokenizer, s2s_model)
@st.cache(allow_output_mutation=True)
def load_indexes():
if LOAD_DENSE_INDEX:
faiss_res = faiss.StandardGpuResources()
wiki40b_passages = datasets.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0")["train"]
wiki40b_passage_reps = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat",
dtype="float32",
mode="r",
shape=(wiki40b_passages.num_rows, 128),
)
wiki40b_index_flat = faiss.IndexFlatIP(128)
wiki40b_gpu_index_flat = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat)
wiki40b_gpu_index_flat.add(wiki40b_passage_reps) # TODO fix for larger GPU
else:
wiki40b_passages, wiki40b_gpu_index_flat = (None, None)
es_client = Elasticsearch([{"host": "localhost", "port": "9200"}])
return (wiki40b_passages, wiki40b_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=True)
def load_train_data():
eli5 = datasets.load_dataset("eli5", name="LFQA_reddit")
eli5_train = eli5["train_eli5"]
eli5_train_q_reps = np.memmap(
"eli5_questions_reps.dat", dtype="float32", mode="r", shape=(eli5_train.num_rows, 128)
)
eli5_train_q_index = faiss.IndexFlatIP(128)
eli5_train_q_index.add(eli5_train_q_reps)
return (eli5_train, eli5_train_q_index)
passages, gpu_dense_index, es_client = load_indexes()
qar_tokenizer, qar_model, s2s_tokenizer, s2s_model = load_models()
eli5_train, eli5_train_q_index = load_train_data()
def find_nearest_training(question, n_results=10):
q_rep = embed_questions_for_retrieval([question], qar_tokenizer, qar_model)
D, I = eli5_train_q_index.search(q_rep, n_results)
nn_examples = [eli5_train[int(i)] for i in I[0]]
return nn_examples
def make_support(question, source="wiki40b", method="dense", n_results=10):
if source == "none":
support_doc, hit_lst = (" <P> ".join(["" for _ in range(11)]).strip(), [])
else:
if method == "dense":
support_doc, hit_lst = query_qa_dense_index(
question, qar_model, qar_tokenizer, passages, gpu_dense_index, n_results
)
else:
support_doc, hit_lst = query_es_index(
question,
es_client,
index_name="english_wiki40b_snippets_100w",
n_results=n_results,
)
support_list = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
question_doc = "question: {} context: {}".format(question, support_doc)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _: None),
}
)
def answer_question(
question_doc, s2s_model, s2s_tokenizer, min_len=64, max_len=256, sampling=False, n_beams=2, top_p=0.95, temp=0.8
):
with torch.no_grad():
answer = qa_s2s_generate(
question_doc,
s2s_model,
s2s_tokenizer,
num_answers=1,
num_beams=n_beams,
min_len=min_len,
max_len=max_len,
do_sample=sampling,
temp=temp,
top_p=top_p,
top_k=None,
max_input_length=1024,
device="cuda:0",
)[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
header_html = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
header_full = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
description = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
action_list = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
demo_options = st.sidebar.checkbox("Demo options")
if demo_options:
action_st = st.sidebar.selectbox(
"",
action_list,
index=3,
)
action = action_list.index(action_st)
show_type = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
show_passages = show_type == "Show full text of passages"
else:
action = 3
show_passages = True
retrieval_options = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
retriever_info = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
wiki_source = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
index_type = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
wiki_source = "wiki40b"
index_type = "dense"
sampled = "beam"
n_beams = 2
min_len = 64
max_len = 256
top_p = None
temp = None
generate_options = st.sidebar.checkbox("Generation options")
if generate_options:
generate_info = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
sampled = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
min_len = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
max_len = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
n_beams = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
top_p = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
temp = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
n_beams = None
# start main text
questions_list = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
question_s = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
question = st.text_input("Enter your question here:", "")
else:
question = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
_, support_list_dense = make_support(question, source=wiki_source, method="dense", n_results=10)
_, support_list_sparse = make_support(question, source=wiki_source, method="sparse", n_results=10)
support_list = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
support_list = support_list[:10]
question_doc = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
question_doc, support_list = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
answer, support_list = answer_question(
question_doc,
s2s_model,
s2s_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
wiki_url = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
sec_titles = res[1].strip()
if sec_titles == "":
sections = "[{}]({})".format(res[0], wiki_url)
else:
sec_list = sec_titles.split(" & ")
sections = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
nn_train_list = find_nearest_training(question)
train_exple = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
answers_st = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
disclaimer = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 13,474 | 37.28125 | 159 | py |
robust-transformers | robust-transformers-main/examples/research_projects/longform-qa/eli5_utils.py | import functools
import math
import os # noqa: F401
from random import choice, randint
from time import time
import datasets # noqa: F401
import numpy as np
import pandas as pd
import torch
import torch.utils.checkpoint as checkpoint
from elasticsearch import Elasticsearch # noqa: F401
from elasticsearch.helpers import bulk, streaming_bulk # noqa: F401
from torch import nn
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from tqdm import tqdm
import faiss # noqa: F401
from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup
pd.set_option("display.max_colwidth", None)
###############
# Sparse index
###############
def make_es_index_snippets(es_client, passages_dset, index_name="english_wiki_kilt_snippets_100w"):
index_config = {
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"},
"section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"},
"passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"},
}
},
}
es_client.indices.create(index=index_name, body=index_config)
number_of_docs = passages_dset.num_rows
progress = tqdm(unit="docs", total=number_of_docs)
successes = 0
def passage_generator():
for passage in passages_dset:
yield passage
# create the ES index
for ok, action in streaming_bulk(
client=es_client,
index=index_name,
actions=passage_generator(),
):
progress.update(1)
successes += ok
print("Indexed %d documents" % (successes,))
def query_es_index(question, es_client, index_name="english_wiki_kilt_snippets_100w", n_results=10, min_length=20):
q = question.lower()
banned = ["how", "why", "what", "where", "which", "do", "does", "is", "?", "eli5", "eli5:"]
q = " ".join([w for w in q.split() if w not in banned])
response = es_client.search(
index=index_name,
body={
"query": {
"multi_match": {
"query": q,
"fields": ["article_title", "section_title", "passage_text^2"],
"type": "cross_fields",
}
},
"size": 2 * n_results,
},
)
hits = response["hits"]["hits"]
support_doc = "<P> " + " <P> ".join([hit["_source"]["passage_text"] for hit in hits])
res_list = [dict([(k, hit["_source"][k]) for k in hit["_source"] if k != "passage_text"]) for hit in hits]
for r, hit in zip(res_list, hits):
r["passage_id"] = hit["_id"]
r["score"] = hit["_score"]
r["passage_text"] = hit["_source"]["passage_text"]
res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results]
return support_doc, res_list
###############
# ELI5 retriever training
###############
class ELI5DatasetQARetriver(Dataset):
def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None):
self.data = examples_array
self.answer_thres = extra_answer_threshold
self.min_length = min_answer_length
self.training = training
self.n_samples = self.data.num_rows if n_samples is None else n_samples
def __len__(self):
return self.n_samples
def make_example(self, idx):
example = self.data[idx]
question = example["title"]
if self.training:
answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))]
answer_tab = choice(answers).split(" ")
start_idx = randint(0, max(0, len(answer_tab) - self.min_length))
answer_span = " ".join(answer_tab[start_idx:])
else:
answer_span = example["answers"]["text"][0]
return (question, answer_span)
def __getitem__(self, idx):
return self.make_example(idx % self.data.num_rows)
class RetrievalQAEmbedder(nn.Module):
def __init__(self, sent_encoder, dim):
super(RetrievalQAEmbedder, self).__init__()
self.sent_encoder = sent_encoder
self.output_dim = 128
self.project_q = nn.Linear(dim, self.output_dim, bias=False)
self.project_a = nn.Linear(dim, self.output_dim, bias=False)
self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1):
# reproduces BERT forward pass with checkpointing
if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
return self.sent_encoder(input_ids, attention_mask=attention_mask)[1]
else:
# prepare implicit variables
device = input_ids.device
input_shape = input_ids.size()
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
head_mask = [None] * self.sent_encoder.config.num_hidden_layers
extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask(
attention_mask, input_shape, device
)
# define function for checkpointing
def partial_encode(*inputs):
encoder_outputs = self.sent_encoder.encoder(
inputs[0],
attention_mask=inputs[1],
head_mask=head_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.sent_encoder.pooler(sequence_output)
return pooled_output
# run embedding layer on everything at once
embedding_output = self.sent_encoder.embeddings(
input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
)
# run encoding and pooling on one mini-batch at a time
pooled_output_list = []
for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
pooled_output_list.append(pooled_output)
return torch.cat(pooled_output_list, dim=0)
def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1):
q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size)
return self.project_q(q_reps)
def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1):
a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size)
return self.project_a(a_reps)
def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1):
device = q_ids.device
q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size)
a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size)
compare_scores = torch.mm(q_reps, a_reps.t())
loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
loss = (loss_qa + loss_aq) / 2
return loss
def make_qa_retriever_model(model_name="google/bert_uncased_L-8_H-512_A-8", from_file=None, device="cuda:0"):
tokenizer = AutoTokenizer.from_pretrained(model_name)
bert_model = AutoModel.from_pretrained(model_name).to(device)
# run bert_model on a dummy batch to get output dimension
d_ids = torch.LongTensor(
[[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]]
).to(device)
d_mask = torch.LongTensor([[1]]).to(device)
sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1]
qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device)
if from_file is not None:
param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states
qa_embedder.load_state_dict(param_dict["model"])
return tokenizer, qa_embedder
def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device="cuda:0"):
q_ls = [q for q, a in qa_list]
a_ls = [a for q, a in qa_list]
q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True)
q_ids, q_mask = (
torch.LongTensor(q_toks["input_ids"]).to(device),
torch.LongTensor(q_toks["attention_mask"]).to(device),
)
a_toks = tokenizer(a_ls, max_length=max_len, padding="max_length", truncation=True)
a_ids, a_mask = (
torch.LongTensor(a_toks["input_ids"]).to(device),
torch.LongTensor(a_toks["attention_mask"]).to(device),
)
return (q_ids, q_mask, a_ids, a_mask)
def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0):
model.train()
# make iterator
train_sampler = RandomSampler(dataset)
model_collate_fn = functools.partial(
make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
)
data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
# accumulate loss since last print
loc_steps = 0
loc_loss = 0.0
st_time = time()
for step, batch in enumerate(epoch_iterator):
q_ids, q_mask, a_ids, a_mask = batch
pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size)
loss = pre_loss.sum()
# optimizer
loss.backward()
optimizer.step()
scheduler.step()
model.zero_grad()
# some printing within the epoch
loc_loss += loss.item()
loc_steps += 1
if step % args.print_freq == 0 or step == 1:
print(
"{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
e,
step,
len(dataset) // args.batch_size,
loc_loss / loc_steps,
time() - st_time,
)
)
loc_loss = 0
loc_steps = 0
def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0):
model.train()
model_collate_fn = functools.partial(
make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
)
# make iterator
train_samplers = [RandomSampler(dataset) for dataset in dataset_list]
data_loaders = [
DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
for dataset, train_sampler in zip(dataset_list, train_samplers)
]
iterators = [iter(dloader) for dloader in data_loaders]
joint_iter = zip(*iterators)
# accumulate loss since last print
loc_steps = 0
loc_loss = 0.0
st_time = time()
for step, (batches,) in enumerate(zip(joint_iter)):
for batch in batches:
q_ids, q_mask, a_ids, a_mask = batch
loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size)
# optimizer
loss.backward()
optimizer.step()
scheduler.step()
model.zero_grad()
# some printing within the epoch
loc_loss += loss.item()
loc_steps += 1
if step % args.print_freq == 0:
print(
"{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
e,
step,
len(dataset_list[0]) // args.batch_size,
loc_loss / loc_steps,
time() - st_time,
)
)
loc_loss = 0
loc_steps = 0
def evaluate_qa_retriever(model, dataset, tokenizer, args):
model.eval()
# make iterator
eval_sampler = SequentialSampler(dataset)
model_collate_fn = functools.partial(
make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
)
data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn)
epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
tot_loss = 0.0
with torch.no_grad():
for step, batch in enumerate(epoch_iterator):
q_ids, q_mask, a_ids, a_mask = batch
loss = model(q_ids, q_mask, a_ids, a_mask)
tot_loss += loss.item()
return tot_loss / (step + 1)
def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args):
qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-8)
qar_scheduler = get_linear_schedule_with_warmup(
qar_optimizer,
num_warmup_steps=100,
num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size),
)
for e in range(qar_args.num_epochs):
train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e)
m_save_dict = {
"model": qar_model.state_dict(),
"optimizer": qar_optimizer.state_dict(),
"scheduler": qar_scheduler.state_dict(),
}
print("Saving model {}".format(qar_args.model_save_name))
torch.save(m_save_dict, "{}_{}.pth".format(qar_args.model_save_name, e))
eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args)
print("Evaluation loss epoch {:4d}: {:.3f}".format(e, eval_loss))
###############
# ELI5 seq2seq model training
###############
class ELI5DatasetS2S(Dataset):
def __init__(
self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True
):
self.training = training
self.data = examples_array
self.make_doc_function = make_doc_fun
self.document_cache = {} if document_cache is None else document_cache
assert not (make_doc_fun is None and document_cache is None)
# make index of specific question-answer pairs from multi-answers
if self.training:
self.qa_id_list = [
(i, j)
for i, qa in enumerate(self.data)
for j, (a, sc) in enumerate(zip(qa["answers"]["text"], qa["answers"]["score"]))
if j == 0 or sc >= extra_answer_threshold
]
else:
self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)]
def __len__(self):
return len(self.qa_id_list)
def make_example(self, idx):
i, j = self.qa_id_list[idx]
example = self.data[i]
question = example["title"] + " " + example["selftext"]
answer = example["answers"]["text"][j]
q_id = example["q_id"]
if self.make_doc_function is not None:
self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example["title"]))
document = self.document_cache[q_id]
in_st = "question: {} context: {}".format(
question.lower().replace(" --t--", "").strip(),
document.lower().strip(),
)
out_st = answer
return (in_st, out_st)
def __getitem__(self, idx):
return self.make_example(idx)
def make_qa_s2s_model(model_name="facebook/bart-large", from_file=None, device="cuda:0"):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
if from_file is not None:
param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states
model.load_state_dict(param_dict["model"])
return tokenizer, model
def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device="cuda:0"):
q_ls = [q for q, a in qa_list]
a_ls = [a for q, a in qa_list]
q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True)
q_ids, q_mask = (
torch.LongTensor(q_toks["input_ids"]).to(device),
torch.LongTensor(q_toks["attention_mask"]).to(device),
)
a_toks = tokenizer(a_ls, max_length=min(max_len, max_a_len), padding="max_length", truncation=True)
a_ids, a_mask = (
torch.LongTensor(a_toks["input_ids"]).to(device),
torch.LongTensor(a_toks["attention_mask"]).to(device),
)
lm_labels = a_ids[:, 1:].contiguous().clone()
lm_labels[a_mask[:, 1:].contiguous() == 0] = -100
model_inputs = {
"input_ids": q_ids,
"attention_mask": q_mask,
"decoder_input_ids": a_ids[:, :-1].contiguous(),
"lm_labels": lm_labels,
}
return model_inputs
def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False):
model.train()
# make iterator
if curriculum:
train_sampler = SequentialSampler(dataset)
else:
train_sampler = RandomSampler(dataset)
model_collate_fn = functools.partial(
make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
)
data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
# accumulate loss since last print
loc_steps = 0
loc_loss = 0.0
st_time = time()
for step, batch_inputs in enumerate(epoch_iterator):
pre_loss = model(**batch_inputs)[0]
loss = pre_loss.sum() / pre_loss.shape[0]
loss.backward()
# optimizer
if step % args.backward_freq == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
# some printing within the epoch
loc_loss += loss.item()
loc_steps += 1
if step % args.print_freq == 0 or step == 1:
print(
"{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
e,
step,
len(dataset) // args.batch_size,
loc_loss / loc_steps,
time() - st_time,
)
)
loc_loss = 0
loc_steps = 0
def eval_qa_s2s_epoch(model, dataset, tokenizer, args):
model.eval()
# make iterator
train_sampler = SequentialSampler(dataset)
model_collate_fn = functools.partial(
make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0"
)
data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
# accumulate loss since last print
loc_steps = 0
loc_loss = 0.0
st_time = time()
with torch.no_grad():
for step, batch_inputs in enumerate(epoch_iterator):
pre_loss = model(**batch_inputs)[0]
loss = pre_loss.sum() / pre_loss.shape[0]
loc_loss += loss.item()
loc_steps += 1
if step % args.print_freq == 0:
print(
"{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format(
step,
len(dataset) // args.batch_size,
loc_loss / loc_steps,
time() - st_time,
)
)
print(
"Total \t L: {:.3f} \t -- {:.3f}".format(
loc_loss / loc_steps,
time() - st_time,
)
)
def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args):
s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-8)
s2s_scheduler = get_linear_schedule_with_warmup(
s2s_optimizer,
num_warmup_steps=400,
num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size),
)
for e in range(s2s_args.num_epochs):
train_qa_s2s_epoch(
qa_s2s_model,
s2s_train_dset,
qa_s2s_tokenizer,
s2s_optimizer,
s2s_scheduler,
s2s_args,
e,
curriculum=(e == 0),
)
m_save_dict = {
"model": qa_s2s_model.state_dict(),
"optimizer": s2s_optimizer.state_dict(),
"scheduler": s2s_scheduler.state_dict(),
}
print("Saving model {}".format(s2s_args.model_save_name))
eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args)
torch.save(m_save_dict, "{}_{}.pth".format(s2s_args.model_save_name, e))
# generate answer from input "question: ... context: <p> ..."
def qa_s2s_generate(
question_doc,
qa_s2s_model,
qa_s2s_tokenizer,
num_answers=1,
num_beams=None,
min_len=64,
max_len=256,
do_sample=False,
temp=1.0,
top_p=None,
top_k=None,
max_input_length=512,
device="cuda:0",
):
model_inputs = make_qa_s2s_batch(
[(question_doc, "A")],
qa_s2s_tokenizer,
max_input_length,
device=device,
)
n_beams = num_answers if num_beams is None else max(num_beams, num_answers)
generated_ids = qa_s2s_model.generate(
input_ids=model_inputs["input_ids"],
attention_mask=model_inputs["attention_mask"],
min_length=min_len,
max_length=max_len,
do_sample=do_sample,
early_stopping=True,
num_beams=1 if do_sample else n_beams,
temperature=temp,
top_k=top_k,
top_p=top_p,
eos_token_id=qa_s2s_tokenizer.eos_token_id,
no_repeat_ngram_size=3,
num_return_sequences=num_answers,
decoder_start_token_id=qa_s2s_tokenizer.bos_token_id,
)
return [qa_s2s_tokenizer.decode(ans_ids, skip_special_tokens=True).strip() for ans_ids in generated_ids]
###############
# ELI5-trained retrieval model usage
###############
def embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length=128, device="cuda:0"):
a_toks = tokenizer(passages, max_length=max_length, padding="max_length", truncation=True)
a_ids, a_mask = (
torch.LongTensor(a_toks["input_ids"]).to(device),
torch.LongTensor(a_toks["attention_mask"]).to(device),
)
with torch.no_grad():
a_reps = qa_embedder.embed_answers(a_ids, a_mask).cpu().type(torch.float)
return a_reps.numpy()
def embed_questions_for_retrieval(q_ls, tokenizer, qa_embedder, device="cuda:0"):
q_toks = tokenizer(q_ls, max_length=128, padding="max_length", truncation=True)
q_ids, q_mask = (
torch.LongTensor(q_toks["input_ids"]).to(device),
torch.LongTensor(q_toks["attention_mask"]).to(device),
)
with torch.no_grad():
q_reps = qa_embedder.embed_questions(q_ids, q_mask).cpu().type(torch.float)
return q_reps.numpy()
def make_qa_dense_index(
qa_embedder,
tokenizer,
passages_dset,
batch_size=512,
max_length=128,
index_name="kilt_passages_reps.dat",
dtype="float32",
device="cuda:0",
):
st_time = time()
fp = np.memmap(index_name, dtype=dtype, mode="w+", shape=(passages_dset.num_rows, 128))
n_batches = math.ceil(passages_dset.num_rows / batch_size)
for i in range(n_batches):
passages = [p for p in passages_dset[i * batch_size : (i + 1) * batch_size]["passage_text"]]
reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length, device)
fp[i * batch_size : (i + 1) * batch_size] = reps
if i % 50 == 0:
print(i, time() - st_time)
def evaluate_retriever(qa_list, retriever_func, scoring_func, n_ret=10, verbose=False):
total_retriever_time = 0.0
total_retriever_score = 0.0
st_time = time()
for i, (question, answer) in enumerate(qa_list):
r_time = time()
retrieved_passages = retriever_func(question, n_ret)
total_retriever_time += time() - r_time
total_retriever_score += scoring_func(retrieved_passages, answer)
if verbose and ((i + 1) % 500 == 0 or i <= 1):
print(
"{:03d}: S-{:.4f} T-{:.4f} | {:.2f}".format(
i + 1, total_retriever_score / (i + 1), total_retriever_time / (i + 1), time() - st_time
)
)
return {"idf_recall": total_retriever_score / (i + 1), "retrieval_time": total_retriever_time / (i + 1)}
# build a support document for the question out of Wikipedia snippets
def query_qa_dense_index(
question, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20, device="cuda:0"
):
q_rep = embed_questions_for_retrieval([question], tokenizer, qa_embedder, device=device)
D, I = wiki_index.search(q_rep, 2 * n_results)
res_passages = [wiki_passages[int(i)] for i in I[0]]
support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages])
res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results]
for r, sc in zip(res_list, D[0]):
r["score"] = float(sc)
return support_doc, res_list
def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10):
q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder)
D, I = wiki_index.search(q_rep, n_results)
res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I]
support_doc_lst = [
"<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst
]
all_res_lists = []
for (res_passages, dl) in zip(res_passages_lst, D):
res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
for r, sc in zip(res_list, dl):
r["score"] = float(sc)
all_res_lists += [res_list[:]]
return support_doc_lst, all_res_lists
# find nearest neighbors of an answer or declarative text in Wikipedia snippets
def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20):
a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder)
D, I = wiki_index.search(a_rep, 2 * n_results)
res_passages = [wiki_passages[int(i)] for i in I[0]]
support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages])
res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results]
for r, sc, i in zip(res_list, D[0], I[0]):
r["passage_id"] = int(i)
r["score"] = float(sc)
return support_doc, res_list
def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10):
a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder)
D, I = wiki_index.search(a_reps, n_results)
res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I]
support_doc_lst = [
"<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst
]
all_res_lists = []
for (res_passages, dl, il) in zip(res_passages_lst, D, I):
res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
for r, sc, i in zip(res_list, dl, il):
r["passage_id"] = int(i)
r["score"] = float(sc)
all_res_lists += [res_list[:]]
return support_doc_lst, all_res_lists
| 28,299 | 40.07402 | 119 | py |
robust-transformers | robust-transformers-main/examples/research_projects/codeparrot/scripts/codeparrot_training.py | import logging
from argparse import Namespace
from pathlib import Path
import datasets
import torch
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
import transformers
import wandb
from accelerate import Accelerator
from arguments import TrainingArguments
from huggingface_hub import Repository
from transformers import AdamW, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, get_scheduler, set_seed
class ConstantLengthDataset(IterableDataset):
"""
Iterable dataset that returns constant length chunks of tokens from stream of text files.
Args:
tokenizer (Tokenizer): The processor used for proccessing the data.
dataset (dataset.Dataset): Dataset with text files.
infinite (bool): If True the iterator is reset after dataset reaches end else stops.
seq_length (int): Length of token sequences to return.
num_of_sequences: Number of token sequences to keep in buffer.
chars_per_token: Number of characters per token used to estimate number of tokens in text buffer.
"""
def __init__(
self, tokenizer, dataset, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6
):
self.tokenizer = tokenizer
self.concat_token_id = tokenizer.bos_token_id
self.dataset = dataset
self.seq_length = seq_length
self.input_characters = seq_length * chars_per_token * num_of_sequences
self.epoch = 0
self.infinite = infinite
def __iter__(self):
iterator = iter(self.dataset)
more_examples = True
while more_examples:
buffer, buffer_len = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(iterator)["content"])
buffer_len += len(buffer[-1])
except StopIteration:
if self.infinite:
iterator = iter(self.dataset)
self.epoch += 1
logger.info(f"Dataset epoch: {self.epoch}")
else:
more_examples = False
break
tokenized_inputs = self.tokenizer(buffer, truncation=False)["input_ids"]
all_token_ids = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0, len(all_token_ids), self.seq_length):
input_ids = all_token_ids[i : i + self.seq_length]
if len(input_ids) == self.seq_length:
yield torch.tensor(input_ids)
def setup_logging(args):
project_name = args.model_ckpt.split("/")[-1]
logger = logging.getLogger(__name__)
log_dir = Path(args.save_dir) / "log/"
log_dir.mkdir(exist_ok=True)
filename = f"debug_{accelerator.process_index}.log"
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[logging.FileHandler(log_dir / filename), logging.StreamHandler()],
)
if accelerator.is_main_process: # we only want to setup logging once
wandb.init(project=project_name, config=args)
run_name = wandb.run.name
tb_writer = SummaryWriter()
tb_writer.add_hparams(vars(args), {"0": 0})
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_info()
transformers.utils.logging.set_verbosity_info()
else:
tb_writer = None
run_name = ""
logger.setLevel(logging.ERROR)
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
return logger, tb_writer, run_name
def create_dataloaders(args):
ds_kwargs = {"streaming": True}
train_data = load_dataset(args.dataset_name_train, split="train", **ds_kwargs)
train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed)
valid_data = load_dataset(args.dataset_name_valid, split="train", **ds_kwargs)
train_dataset = ConstantLengthDataset(tokenizer, train_data, infinite=True, seq_length=args.seq_length)
valid_dataset = ConstantLengthDataset(tokenizer, valid_data, infinite=False, seq_length=args.seq_length)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)
eval_dataloader = DataLoader(valid_dataset, batch_size=args.valid_batch_size)
return train_dataloader, eval_dataloader
def get_grouped_params(model, args, no_decay=["bias", "LayerNorm.weight"]):
params_with_wd, params_without_wd = [], []
for n, p in model.named_parameters():
if any(nd in n for nd in no_decay):
params_without_wd.append(p)
else:
params_with_wd.append(p)
return [
{"params": params_with_wd, "weight_decay": args.weight_decay},
{"params": params_without_wd, "weight_decay": 0.0},
]
def log_metrics(step, metrics):
logger.info(f"Step {step}: {metrics}")
if accelerator.is_main_process:
wandb.log(metrics)
[tb_writer.add_scalar(k, v, step) for k, v in metrics.items()]
def evaluate(args):
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(batch, labels=batch)
loss = outputs.loss.repeat(args.valid_batch_size)
losses.append(accelerator.gather(loss))
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
loss = torch.mean(torch.cat(losses))
try:
perplexity = torch.exp(loss)
except OverflowError:
perplexity = float("inf")
return loss.item(), perplexity.item()
# Accelerator
accelerator = Accelerator()
acc_state = {str(k): str(v) for k, v in accelerator.state.__dict__.items()}
# Settings
parser = HfArgumentParser(TrainingArguments)
args = parser.parse_args()
args = Namespace(**vars(args), **acc_state)
samples_per_step = accelerator.state.num_processes * args.train_batch_size
set_seed(args.seed)
# Clone model repository
if accelerator.is_main_process:
hf_repo = Repository(args.save_dir, clone_from=args.model_ckpt)
# Logging
logger, tb_writer, run_name = setup_logging(args)
logger.info(accelerator.state)
# Checkout new branch on repo
if accelerator.is_main_process:
hf_repo.git_checkout(run_name, create_branch_ok=True)
# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(args.save_dir)
if args.gradient_checkpointing:
model.gradient_checkpointing_enable()
tokenizer = AutoTokenizer.from_pretrained(args.save_dir)
# Load dataset and dataloader
train_dataloader, eval_dataloader = create_dataloaders(args)
# Prepare the optimizer and learning rate scheduler
optimizer = AdamW(get_grouped_params(model, args), lr=args.learning_rate)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
def get_lr():
return optimizer.param_groups[0]["lr"]
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Train model
model.train()
completed_steps = 0
for step, batch in enumerate(train_dataloader, start=1):
loss = model(batch, labels=batch, use_cache=False).loss
log_metrics(
step, {"lr": get_lr(), "samples": step * samples_per_step, "steps": completed_steps, "loss/train": loss.item()}
)
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
completed_steps += 1
if step % args.save_checkpoint_steps == 0:
logger.info("Evaluating and saving model checkpoint")
eval_loss, perplexity = evaluate(args)
log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity})
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.save_dir, save_function=accelerator.save)
if accelerator.is_main_process:
hf_repo.push_to_hub(commit_message=f"step {step}")
model.train()
if completed_steps >= args.max_train_steps:
break
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
eval_loss, perplexity = evaluate(args)
log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity})
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.save_dir, save_function=accelerator.save)
if accelerator.is_main_process:
hf_repo.push_to_hub(commit_message="final model")
| 9,194 | 37.153527 | 119 | py |
robust-transformers | robust-transformers-main/examples/research_projects/codeparrot/scripts/validation_loss.py | import logging
import torch
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from accelerate import Accelerator
from arguments import EvaluationArguments
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class ConstantLengthDataset(IterableDataset):
def __init__(self, tokenizer, dataset, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6):
self.tokenizer = tokenizer
self.concat_token_id = tokenizer.bos_token_id
self.dataset = dataset
self.seq_length = seq_length
self.input_characters = seq_length * chars_per_token * num_of_sequences
def __iter__(self):
iterator = iter(self.dataset)
more_examples = True
while more_examples:
buffer, buffer_len = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(iterator)["content"])
buffer_len += len(buffer[-1])
except StopIteration:
more_examples = False
break
tokenized_inputs = tokenizer(buffer, truncation=False)["input_ids"]
all_token_ids = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0, len(all_token_ids), self.seq_length):
input_ids = all_token_ids[i : i + self.seq_length]
if len(input_ids) == self.seq_length:
yield torch.tensor(input_ids)
def create_dataloader(args):
ds_kwargs = {"streaming": True}
valid_data = load_dataset(args.dataset_name, split="train", **ds_kwargs)
valid_dataset = ConstantLengthDataset(tokenizer, valid_data, seq_length=args.seq_length)
eval_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size)
return eval_dataloader
def evaluate(args):
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(batch, labels=batch)
loss = outputs.loss.repeat(args.batch_size)
losses.append(accelerator.gather(loss))
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
loss = torch.mean(torch.cat(losses))
try:
perplexity = torch.exp(loss)
except OverflowError:
perplexity = float("inf")
return loss.item(), perplexity.item()
# Setup Accelerator
accelerator = Accelerator()
# Parse configuration
parser = HfArgumentParser(EvaluationArguments)
args = parser.parse_args()
set_seed(args.seed)
# Logging
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
tokenizer = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
eval_dataloader = create_dataloader(args)
# Prepare everything with our `accelerator`.
model, eval_dataloader = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
eval_loss, perplexity = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 3,496 | 33.97 | 114 | py |
robust-transformers | robust-transformers-main/examples/research_projects/bertology/run_prune_gpt.py | #!/usr/bin/env python3
""" This script is adapted from the Bertology pruning code (https://github.com/huggingface/transformers/blob/783d7d2629e97c5f0c5f9ef01b8c66410275c204/examples/research_projects/bertology/run_bertology.py)
to prune GPT-like models. The author is @altsoph.
"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPT2LMHeadModel
logger = logging.getLogger(__name__)
def save_model(model, dirpath):
# save results
if os.path.exists(dirpath):
if os.path.exists(os.path.join(dirpath, "config.json")) and os.path.isfile(
os.path.join(dirpath, "config.json")
):
os.remove(os.path.join(dirpath, "config.json"))
if os.path.exists(os.path.join(dirpath, "pytorch_model.bin")) and os.path.isfile(
os.path.join(dirpath, "pytorch_model.bin")
):
os.remove(os.path.join(dirpath, "pytorch_model.bin"))
else:
os.makedirs(dirpath)
model.save_pretrained(dirpath)
def entropy(p, unlogit=False):
"""Compute the entropy of a probability distribution"""
exponent = 2
if unlogit:
p = torch.pow(p, exponent)
plogp = p * torch.log(p)
plogp[p == 0] = 0
return -plogp.sum(dim=-1)
def print_2d_tensor(tensor):
"""Print a 2D tensor"""
logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor))))
for row in range(len(tensor)):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data))
else:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data))
def compute_heads_importance(
args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False
):
"""This method shows how to compute:
- head attention entropy
- head importance scores according to http://arxiv.org/abs/1905.10650
"""
# Prepare our tensors
n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads
head_importance = torch.zeros(n_layers, n_heads).to(args.device)
attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
if head_mask is None:
head_mask = torch.ones(n_layers, n_heads).to(args.device)
head_mask.requires_grad_(requires_grad=True)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
head_mask = None
tot_tokens = 0.0
total_loss = 0.0
for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
inputs = tuple(t.to(args.device) for t in inputs)
(input_ids,) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
outputs = model(input_ids, labels=input_ids, head_mask=head_mask)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
loss, _, all_attentions = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(all_attentions):
masked_entropy = entropy(attn.detach(), True)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(input_ids).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies")
print_2d_tensor(attn_entropy)
if compute_importance:
logger.info("Head importance scores")
print_2d_tensor(head_importance)
logger.info("Head ranked by importance scores")
head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(
head_importance.numel(), device=args.device
)
head_ranks = head_ranks.view_as(head_importance)
print_2d_tensor(head_ranks)
return attn_entropy, head_importance, total_loss
def mask_heads(args, model, eval_dataloader):
"""This method shows how to mask head (set some heads to zero), to test the effect on the network,
based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
_, head_importance, loss = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
original_score = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold)
new_head_mask = torch.ones_like(head_importance)
num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))
current_score = original_score
while current_score >= original_score * args.masking_threshold:
head_mask = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
head_importance[head_mask == 0.0] = float("Inf")
current_heads_to_mask = head_importance.view(-1).sort()[1]
if len(current_heads_to_mask) <= num_to_mask:
print("BREAK BY num_to_mask")
break
# mask heads
current_heads_to_mask = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist()))
new_head_mask = new_head_mask.view(-1)
new_head_mask[current_heads_to_mask] = 0.0
new_head_mask = new_head_mask.view_as(head_mask)
new_head_mask = new_head_mask.clone().detach()
print_2d_tensor(new_head_mask)
# Compute metric and head importance again
_, head_importance, loss = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask
)
current_score = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)",
current_score,
new_head_mask.sum(),
new_head_mask.sum() / new_head_mask.numel() * 100,
)
logger.info("Final head mask")
print_2d_tensor(head_mask)
np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy())
return head_mask
def prune_heads(args, model, eval_dataloader, head_mask):
"""This method shows how to prune head (remove heads weights) based on
the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
# Try pruning and test time speedup
# Pruning is like masking but we actually remove the masked weights
before_time = datetime.now()
_, _, loss = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask
)
score_masking = 1 / loss
original_time = datetime.now() - before_time
original_num_params = sum(p.numel() for p in model.parameters())
heads_to_prune = dict(
(layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask))
)
for k, v in heads_to_prune.items():
if isinstance(v, int):
heads_to_prune[k] = [
v,
]
assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(heads_to_prune)
pruned_num_params = sum(p.numel() for p in model.parameters())
before_time = datetime.now()
_, _, loss = compute_heads_importance(
args,
model,
eval_dataloader,
compute_entropy=False,
compute_importance=False,
head_mask=None,
actually_pruned=True,
)
score_pruning = 1 / loss
new_time = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)",
original_num_params,
pruned_num_params,
pruned_num_params / original_num_params * 100,
)
logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning)
logger.info("Pruning: speed ratio (original timing / new timing): %f percents", original_time / new_time * 100)
save_model(model, args.output_dir)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances."
)
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers"
)
parser.add_argument(
"--dont_normalize_global_importance",
action="store_true",
help="Don't normalize all importance scores between 0 and 1",
)
parser.add_argument(
"--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy."
)
parser.add_argument(
"--masking_threshold",
default=0.9,
type=float,
help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).",
)
parser.add_argument(
"--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step."
)
parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.")
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded.",
)
parser.add_argument("--batch_size", default=1, type=int, help="Batch size.")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
args.n_gpu = 1
torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1)))
model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
model = nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
elif args.n_gpu > 1:
model = nn.DataParallel(model)
# Print/save training arguments
os.makedirs(args.output_dir, exist_ok=True)
torch.save(args, os.path.join(args.output_dir, "run_args.bin"))
logger.info("Training/evaluation parameters %s", args)
# Prepare dataset
numpy_data = np.concatenate(
[
np.loadtxt(args.data_dir, dtype=np.int64),
]
)
train_tensor_dataset = (torch.from_numpy(numpy_data),)
train_data = TensorDataset(*train_tensor_dataset)
train_sampler = RandomSampler(train_data)
eval_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(args, model, eval_dataloader)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
head_mask = mask_heads(args, model, eval_dataloader)
prune_heads(args, model, eval_dataloader, head_mask)
if __name__ == "__main__":
main()
| 15,469 | 38.666667 | 204 | py |
robust-transformers | robust-transformers-main/examples/research_projects/bertology/run_bertology.py | #!/usr/bin/env python3
# Copyright 2018 CMU and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bertology: this script shows how you can explore the internals of the models in the library to:
- compute the entropy of the head attentions
- compute the importance of each head
- prune (remove) the low importance head.
Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650)
which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1
"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, SequentialSampler, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
GlueDataset,
default_data_collator,
glue_compute_metrics,
glue_output_modes,
glue_processors,
set_seed,
)
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
def entropy(p):
"""Compute the entropy of a probability distribution"""
plogp = p * torch.log(p)
plogp[p == 0] = 0
return -plogp.sum(dim=-1)
def print_2d_tensor(tensor):
"""Print a 2D tensor"""
logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor))))
for row in range(len(tensor)):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data))
else:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data))
def compute_heads_importance(
args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False
):
"""This method shows how to compute:
- head attention entropy
- head importance scores according to http://arxiv.org/abs/1905.10650
"""
# Prepare our tensors
n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads
head_importance = torch.zeros(n_layers, n_heads).to(args.device)
attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
if head_mask is None:
head_mask = torch.ones(n_layers, n_heads).to(args.device)
head_mask.requires_grad_(requires_grad=True)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
head_mask = None
preds = None
labels = None
tot_tokens = 0.0
for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
for k, v in inputs.items():
inputs[k] = v.to(args.device)
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
outputs = model(**inputs, head_mask=head_mask)
loss, logits, all_attentions = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
if compute_entropy:
for layer, attn in enumerate(all_attentions):
masked_entropy = entropy(attn.detach()) * inputs["attention_mask"].float().unsqueeze(1)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
# Also store our logits/labels if we want to compute metrics afterwards
if preds is None:
preds = logits.detach().cpu().numpy()
labels = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
labels = np.append(labels, inputs["labels"].detach().cpu().numpy(), axis=0)
tot_tokens += inputs["attention_mask"].float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print/save matrices
np.save(os.path.join(args.output_dir, "attn_entropy.npy"), attn_entropy.detach().cpu().numpy())
np.save(os.path.join(args.output_dir, "head_importance.npy"), head_importance.detach().cpu().numpy())
logger.info("Attention entropies")
print_2d_tensor(attn_entropy)
logger.info("Head importance scores")
print_2d_tensor(head_importance)
logger.info("Head ranked by importance scores")
head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(
head_importance.numel(), device=args.device
)
head_ranks = head_ranks.view_as(head_importance)
print_2d_tensor(head_ranks)
return attn_entropy, head_importance, preds, labels
def mask_heads(args, model, eval_dataloader):
"""This method shows how to mask head (set some heads to zero), to test the effect on the network,
based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
_, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
original_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold)
new_head_mask = torch.ones_like(head_importance)
num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))
current_score = original_score
while current_score >= original_score * args.masking_threshold:
head_mask = new_head_mask.clone() # save current head mask
# heads from least important to most - keep only not-masked heads
head_importance[head_mask == 0.0] = float("Inf")
current_heads_to_mask = head_importance.view(-1).sort()[1]
if len(current_heads_to_mask) <= num_to_mask:
break
# mask heads
current_heads_to_mask = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist()))
new_head_mask = new_head_mask.view(-1)
new_head_mask[current_heads_to_mask] = 0.0
new_head_mask = new_head_mask.view_as(head_mask)
new_head_mask = new_head_mask.clone().detach()
print_2d_tensor(new_head_mask)
# Compute metric and head importance again
_, head_importance, preds, labels = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
current_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)",
current_score,
new_head_mask.sum(),
new_head_mask.sum() / new_head_mask.numel() * 100,
)
logger.info("Final head mask")
print_2d_tensor(head_mask)
np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy())
return head_mask
def prune_heads(args, model, eval_dataloader, head_mask):
"""This method shows how to prune head (remove heads weights) based on
the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
# Try pruning and test time speedup
# Pruning is like masking but we actually remove the masked weights
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_masking = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
original_time = datetime.now() - before_time
original_num_params = sum(p.numel() for p in model.parameters())
heads_to_prune = dict(
(layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask))
)
assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(heads_to_prune)
pruned_num_params = sum(p.numel() for p in model.parameters())
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(
args,
model,
eval_dataloader,
compute_entropy=False,
compute_importance=False,
head_mask=None,
actually_pruned=True,
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_pruning = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
new_time = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)",
original_num_params,
pruned_num_params,
pruned_num_params / original_num_params * 100,
)
logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning)
logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time / new_time * 100)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(glue_processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances."
)
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers"
)
parser.add_argument(
"--dont_normalize_global_importance",
action="store_true",
help="Don't normalize all importance scores between 0 and 1",
)
parser.add_argument(
"--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy."
)
parser.add_argument(
"--masking_threshold",
default=0.9,
type=float,
help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).",
)
parser.add_argument(
"--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step."
)
parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.")
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded.",
)
parser.add_argument("--batch_size", default=1, type=int, help="Batch size.")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
args.n_gpu = 1
torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1)))
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seeds
set_seed(args.seed)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in glue_processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = glue_processors[args.task_name]()
args.output_mode = glue_output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
output_attentions=True,
cache_dir=args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir,
)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
model = nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
elif args.n_gpu > 1:
model = nn.DataParallel(model)
# Print/save training arguments
os.makedirs(args.output_dir, exist_ok=True)
torch.save(args, os.path.join(args.output_dir, "run_args.bin"))
logger.info("Training/evaluation parameters %s", args)
# Prepare dataset for the GLUE task
eval_dataset = GlueDataset(args, tokenizer=tokenizer, mode="dev")
if args.data_subset > 0:
eval_dataset = Subset(eval_dataset, list(range(min(args.data_subset, len(eval_dataset)))))
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=default_data_collator
)
# Compute head entropy and importance score
compute_heads_importance(args, model, eval_dataloader)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
head_mask = mask_heads(args, model, eval_dataloader)
prune_heads(args, model, eval_dataloader, head_mask)
if __name__ == "__main__":
main()
| 18,572 | 40.181818 | 118 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/use_own_knowledge_dataset.py | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import torch
from datasets import Features, Sequence, Value, load_dataset
import faiss
from transformers import (
DPRContextEncoder,
DPRContextEncoderTokenizerFast,
HfArgumentParser,
RagRetriever,
RagSequenceForGeneration,
RagTokenizer,
)
logger = logging.getLogger(__name__)
torch.set_grad_enabled(False)
device = "cuda" if torch.cuda.is_available() else "cpu"
def split_text(text: str, n=100, character=" ") -> List[str]:
"""Split the text every ``n``-th occurrence of ``character``"""
text = text.split(character)
return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)]
def split_documents(documents: dict) -> dict:
"""Split documents into passages"""
titles, texts = [], []
for title, text in zip(documents["title"], documents["text"]):
if text is not None:
for passage in split_text(text):
titles.append(title if title is not None else "")
texts.append(passage)
return {"title": titles, "text": texts}
def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict:
"""Compute the DPR embeddings of document passages"""
input_ids = ctx_tokenizer(
documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt"
)["input_ids"]
embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def main(
rag_example_args: "RagExampleArguments",
processing_args: "ProcessingArguments",
index_hnsw_args: "IndexHnswArguments",
):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
dataset = load_dataset(
"csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"]
)
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc)
# And compute the embeddings
ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device)
ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
new_features = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}
) # optional, save as float32 instead of float64 to save space
dataset = dataset.map(
partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer),
batched=True,
batch_size=processing_args.batch_size,
features=new_features,
)
# And finally save your dataset
passages_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset")
dataset.save_to_disk(passages_path)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings", custom_index=index)
# And save the index
index_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(index_path)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
######################################
logger.info("Step 3 - Load RAG")
######################################
# Easy way to load the model
retriever = RagRetriever.from_pretrained(
rag_example_args.rag_model_name, index_name="custom", indexed_dataset=dataset
)
model = RagSequenceForGeneration.from_pretrained(rag_example_args.rag_model_name, retriever=retriever)
tokenizer = RagTokenizer.from_pretrained(rag_example_args.rag_model_name)
# For distributed fine-tuning you'll need to provide the paths instead, as the dataset and the index are loaded separately.
# retriever = RagRetriever.from_pretrained(rag_model_name, index_name="custom", passages_path=passages_path, index_path=index_path)
######################################
logger.info("Step 4 - Have fun")
######################################
question = rag_example_args.question or "What does Moses' rod turn into ?"
input_ids = tokenizer.question_encoder(question, return_tensors="pt")["input_ids"]
generated = model.generate(input_ids)
generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)[0]
logger.info("Q: " + question)
logger.info("A: " + generated_string)
@dataclass
class RagExampleArguments:
csv_path: str = field(
default=str(Path(__file__).parent / "test_data" / "my_knowledge_dataset.csv"),
metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"},
)
question: Optional[str] = field(
default=None,
metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."},
)
rag_model_name: str = field(
default="facebook/rag-sequence-nq",
metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"},
)
dpr_ctx_encoder_model_name: str = field(
default="facebook/dpr-ctx_encoder-multiset-base",
metadata={
"help": "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or 'facebook/dpr-ctx_encoder-multiset-base'"
},
)
output_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to a directory where the dataset passages and the index will be saved"},
)
@dataclass
class ProcessingArguments:
num_proc: Optional[int] = field(
default=None,
metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
},
)
batch_size: int = field(
default=16,
metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
},
)
@dataclass
class IndexHnswArguments:
d: int = field(
default=768,
metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."},
)
m: int = field(
default=128,
metadata={
"help": "The number of bi-directional links created for every new element during the HNSW index construction."
},
)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
parser = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
rag_example_args, processing_args, index_hnsw_args = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
rag_example_args.output_dir = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 8,174 | 38.878049 | 152 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/utils_rag.py | import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, T5Tokenizer
def encode_line(tokenizer, line, max_length, padding_side, pad_to_max_length=True, return_tensors="pt"):
extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) and not line.startswith(" ") else {}
tokenizer.padding_side = padding_side
return tokenizer(
[line],
max_length=max_length,
padding="max_length" if pad_to_max_length else None,
truncation=True,
return_tensors=return_tensors,
add_special_tokens=True,
**extra_kw,
)
def trim_batch(
input_ids,
pad_token_id,
attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class Seq2SeqDataset(Dataset):
def __init__(
self,
tokenizer,
data_dir,
max_source_length,
max_target_length,
type_path="train",
n_obs=None,
src_lang=None,
tgt_lang=None,
prefix="",
):
super().__init__()
self.src_file = Path(data_dir).joinpath(type_path + ".source")
self.tgt_file = Path(data_dir).joinpath(type_path + ".target")
self.src_lens = self.get_char_lens(self.src_file)
self.max_source_length = max_source_length
self.max_target_length = max_target_length
assert min(self.src_lens) > 0, f"found empty line in {self.src_file}"
self.tokenizer = tokenizer
self.prefix = prefix
if n_obs is not None:
self.src_lens = self.src_lens[:n_obs]
self.src_lang = src_lang
self.tgt_lang = tgt_lang
def __len__(self):
return len(self.src_lens)
def __getitem__(self, index) -> Dict[str, torch.Tensor]:
index = index + 1 # linecache starts at 1
source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n")
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n")
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer, T5Tokenizer):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
source_tokenizer = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer
)
target_tokenizer = self.tokenizer.generator if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer
source_inputs = encode_line(source_tokenizer, source_line, self.max_source_length, "right")
target_inputs = encode_line(target_tokenizer, tgt_line, self.max_target_length, "right")
source_ids = source_inputs["input_ids"].squeeze()
target_ids = target_inputs["input_ids"].squeeze()
src_mask = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def get_char_lens(data_file):
return [len(x) for x in Path(data_file).open().readlines()]
def collate_fn(self, batch) -> Dict[str, torch.Tensor]:
input_ids = torch.stack([x["input_ids"] for x in batch])
masks = torch.stack([x["attention_mask"] for x in batch])
target_ids = torch.stack([x["decoder_input_ids"] for x in batch])
tgt_pad_token_id = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
src_pad_token_id = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
y = trim_batch(target_ids, tgt_pad_token_id)
source_ids, source_mask = trim_batch(input_ids, src_pad_token_id, attention_mask=masks)
batch = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
logger = getLogger(__name__)
def flatten_list(summary_ids: List[List]):
return [x for x in itertools.chain.from_iterable(summary_ids)]
def save_git_info(folder_path: str) -> None:
"""Save git information to output_dir/git_log.json"""
repo_infos = get_git_info()
save_json(repo_infos, os.path.join(folder_path, "git_log.json"))
def save_json(content, path, indent=4, **json_dump_kwargs):
with open(path, "w") as f:
json.dump(content, f, indent=indent, **json_dump_kwargs)
def load_json(path):
with open(path) as f:
return json.load(f)
def get_git_info():
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
"hostname": str(socket.gethostname()),
}
return repo_infos
def lmap(f: Callable, x: Iterable) -> List:
"""list(map(f, x))"""
return list(map(f, x))
def pickle_save(obj, path):
"""pickle.dump(obj, path)"""
with open(path, "wb") as f:
return pickle.dump(obj, f)
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def calculate_exact_match(output_lns: List[str], reference_lns: List[str]) -> Dict:
assert len(output_lns) == len(reference_lns)
em = 0
for hypo, pred in zip(output_lns, reference_lns):
em += exact_match_score(hypo, pred)
if len(output_lns) > 0:
em /= len(output_lns)
return {"em": em}
def is_rag_model(model_prefix):
return model_prefix.startswith("rag")
def set_extra_model_params(extra_params, hparams, config):
equivalent_param = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
equivalent_param["dropout"] = "dropout_rate"
for p in extra_params:
if getattr(hparams, p, None):
if not hasattr(config, p) and not hasattr(config, equivalent_param[p]):
logger.info("config doesn't have a `{}` attribute".format(p))
delattr(hparams, p)
continue
set_p = p if hasattr(config, p) else equivalent_param[p]
setattr(config, set_p, getattr(hparams, p))
delattr(hparams, p)
return hparams, config
| 8,114 | 32.122449 | 118 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/finetune_rag.py | """Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py"""
import argparse
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
import torch.distributed as dist
import torch.distributed as torch_distrib
from pytorch_lightning.plugins.training_type import DDPPlugin
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoTokenizer,
BartForConditionalGeneration,
BatchEncoding,
RagConfig,
RagSequenceForGeneration,
RagTokenForGeneration,
RagTokenizer,
T5ForConditionalGeneration,
)
from transformers import logging as transformers_logging
from transformers.integrations import is_ray_available
if is_ray_available():
import ray
from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever
from callbacks_rag import ( # noqa: E402 # isort:skipq
get_checkpoint_callback,
get_early_stopping_callback,
Seq2SeqLoggingCallback,
)
from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip
from utils_rag import ( # noqa: E402 # isort:skip
calculate_exact_match,
flatten_list,
get_git_info,
is_rag_model,
lmap,
pickle_save,
save_git_info,
save_json,
set_extra_model_params,
Seq2SeqDataset,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
transformers_logging.set_verbosity_info()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class CustomDDP(DDPPlugin):
def init_ddp_connection(self, global_rank=None, world_size=None) -> None:
module = self.model
global_rank = global_rank if global_rank is not None else self.cluster_environment.global_rank()
world_size = world_size if world_size is not None else self.cluster_environment.world_size()
os.environ["MASTER_ADDR"] = self.cluster_environment.master_address()
os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port())
if not torch.distributed.is_initialized():
logger.info(f"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}")
torch_distrib.init_process_group(self.torch_distributed_backend, rank=global_rank, world_size=world_size)
if module.is_rag_model:
self.distributed_port = module.hparams.distributed_port
if module.distributed_retriever == "pytorch":
module.model.rag.retriever.init_retrieval(self.distributed_port)
elif module.distributed_retriever == "ray" and global_rank == 0:
# For the Ray retriever, only initialize it once when global
# rank is 0.
module.model.rag.retriever.init_retrieval()
class GenerativeQAModule(BaseTransformer):
mode = "generative_qa"
loss_names = ["loss"]
metric_names = ["em"]
val_metric = "em"
def __init__(self, hparams, **kwargs):
# when loading from a pytorch lightning checkpoint, hparams are passed as dict
if isinstance(hparams, dict):
hparams = AttrDict(hparams)
if hparams.model_type == "rag_sequence":
self.model_class = RagSequenceForGeneration
elif hparams.model_type == "rag_token":
self.model_class = RagTokenForGeneration
elif hparams.model_type == "bart":
self.model_class = BartForConditionalGeneration
else:
self.model_class = T5ForConditionalGeneration
self.is_rag_model = is_rag_model(hparams.model_type)
config_class = RagConfig if self.is_rag_model else AutoConfig
config = config_class.from_pretrained(hparams.model_name_or_path)
# set retriever parameters
config.index_name = hparams.index_name or config.index_name
config.passages_path = hparams.passages_path or config.passages_path
config.index_path = hparams.index_path or config.index_path
config.use_dummy_dataset = hparams.use_dummy_dataset
# set extra_model_params for generator configs and load_model
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "attention_dropout", "dropout")
if self.is_rag_model:
if hparams.prefix is not None:
config.generator.prefix = hparams.prefix
config.label_smoothing = hparams.label_smoothing
hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator)
if hparams.distributed_retriever == "pytorch":
retriever = RagPyTorchDistributedRetriever.from_pretrained(hparams.model_name_or_path, config=config)
elif hparams.distributed_retriever == "ray":
# The Ray retriever needs the handles to the retriever actors.
retriever = RagRayDistributedRetriever.from_pretrained(
hparams.model_name_or_path, hparams.actor_handles, config=config
)
model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever)
prefix = config.question_encoder.prefix
else:
if hparams.prefix is not None:
config.prefix = hparams.prefix
hparams, config = set_extra_model_params(extra_model_params, hparams, config)
model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config)
prefix = config.prefix
tokenizer = (
RagTokenizer.from_pretrained(hparams.model_name_or_path)
if self.is_rag_model
else AutoTokenizer.from_pretrained(hparams.model_name_or_path)
)
super().__init__(hparams, config=config, tokenizer=tokenizer, model=model)
save_git_info(self.hparams.output_dir)
self.output_dir = Path(self.hparams.output_dir)
self.metrics_save_path = Path(self.output_dir) / "metrics.json"
self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.dataset_kwargs: dict = dict(
data_dir=self.hparams.data_dir,
max_source_length=self.hparams.max_source_length,
prefix=prefix or "",
)
n_observations_per_split = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
self.target_lens = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
self.hparams.git_sha = get_git_info()["repo_sha"]
self.num_workers = hparams.num_workers
self.distributed_port = self.hparams.distributed_port
# For single GPU training, init_ddp_connection is not called.
# So we need to initialize the retrievers here.
if hparams.gpus <= 1:
if hparams.distributed_retriever == "ray":
self.model.retriever.init_retrieval()
elif hparams.distributed_retriever == "pytorch":
self.model.retriever.init_retrieval(self.distributed_port)
self.distributed_retriever = hparams.distributed_retriever
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
source_ids, source_mask, target_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"]
rag_kwargs = {}
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(target_ids)
lm_labels = target_ids
elif isinstance(self.model, BartForConditionalGeneration):
decoder_input_ids = target_ids[:, :-1].contiguous()
lm_labels = target_ids[:, 1:].clone()
else:
assert self.is_rag_model
generator = self.model.rag.generator
if isinstance(generator, T5ForConditionalGeneration):
decoder_start_token_id = generator.config.decoder_start_token_id
decoder_input_ids = (
torch.cat(
[torch.tensor([[decoder_start_token_id]] * target_ids.shape[0]).to(target_ids), target_ids],
dim=1,
)
if target_ids.shape[0] < self.target_lens["train"]
else generator._shift_right(target_ids)
)
elif isinstance(generator, BartForConditionalGeneration):
decoder_input_ids = target_ids
lm_labels = decoder_input_ids
rag_kwargs["reduce_loss"] = True
assert decoder_input_ids is not None
outputs = self(
source_ids,
attention_mask=source_mask,
decoder_input_ids=decoder_input_ids,
use_cache=False,
labels=lm_labels,
**rag_kwargs,
)
loss = outputs["loss"]
return (loss,)
@property
def pad(self) -> int:
raise NotImplementedError("pad not implemented")
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss.detach() for name, loss in zip(self.loss_names, loss_tensors)}
# tokens per batch
tgt_pad_token_id = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
src_pad_token_id = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
logs["tpb"] = (
batch["input_ids"].ne(src_pad_token_id).sum() + batch["decoder_input_ids"].ne(tgt_pad_token_id).sum()
)
return {"loss": loss_tensors[0], "log": logs}
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix="val") -> Dict:
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses["loss"]
gen_metrics = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
metrics_tensor: torch.FloatTensor = torch.tensor(gen_metrics[self.val_metric]).type_as(loss)
gen_metrics.update({k: v.item() for k, v in losses.items()})
# fix for https://github.com/PyTorchLightning/pytorch-lightning/issues/2424
if dist.is_initialized():
dist.all_reduce(metrics_tensor, op=dist.ReduceOp.SUM)
metrics_tensor = metrics_tensor / dist.get_world_size()
gen_metrics.update({self.val_metric: metrics_tensor.item()})
losses.update(gen_metrics)
metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
metrics["step_count"] = self.step_count
self.save_metrics(metrics, prefix) # writes to self.metrics_save_path
preds = flatten_list([x["preds"] for x in outputs])
return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metrics_tensor}
def save_metrics(self, latest_metrics, type_path) -> None:
self.metrics[type_path].append(latest_metrics)
save_json(self.metrics, self.metrics_save_path)
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_exact_match(preds, target)
def _generative_step(self, batch: dict) -> dict:
start_time = time.time()
batch = BatchEncoding(batch).to(device=self.model.device)
generated_ids = self.model.generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
do_deduplication=False, # rag specific parameter
use_cache=True,
min_length=1,
max_length=self.target_lens["val"],
)
gen_time = (time.time() - start_time) / batch["input_ids"].shape[0]
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"])
loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
gen_metrics: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **gen_metrics)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix="test")
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = Seq2SeqDataset(
self.tokenizer,
type_path=type_path,
n_obs=n_obs,
max_target_length=max_target_length,
**self.dataset_kwargs,
)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader:
dataset = self.get_dataset(type_path)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
shuffle=shuffle,
num_workers=self.num_workers,
)
return dataloader
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("checkpoint{}".format(self.step_count))
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument(
"--max_source_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--val_max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--test_max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default")
parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--label_smoothing", type=float, default=0.0, required=False)
parser.add_argument(
"--prefix",
type=str,
default=None,
help="Prefix added at the beginning of each text, typically used with T5-based models.",
)
parser.add_argument(
"--early_stopping_patience",
type=int,
default=-1,
required=False,
help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.",
)
parser.add_argument(
"--distributed-port", type=int, default=-1, required=False, help="Port number for distributed training."
)
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token", "bart", "t5"],
type=str,
help="RAG model type: sequence or token, if none specified, the type is inferred from the model_name_or_path",
)
return parser
@staticmethod
def add_retriever_specific_args(parser):
parser.add_argument(
"--index_name",
type=str,
default=None,
help="Name of the index to use: 'hf' for a canonical dataset from the datasets library (default), 'custom' for a local index, or 'legacy' for the orignal one)",
)
parser.add_argument(
"--passages_path",
type=str,
default=None,
help="Path to the dataset of passages for custom index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
parser.add_argument(
"--index_path",
type=str,
default=None,
help="Path to the faiss index for custom index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
parser.add_argument(
"--distributed_retriever",
choices=["ray", "pytorch"],
type=str,
default="pytorch",
help="What implementation to use for distributed retriever? If "
"pytorch is selected, the index is loaded on training "
"worker 0, and torch.distributed is used to handle "
"communication between training worker 0, and the other "
"training workers. If ray is selected, the Ray library is "
"used to create load the index on separate processes, "
"and Ray handles the communication between the training "
"workers and the retrieval actors.",
)
parser.add_argument(
"--use_dummy_dataset",
type=bool,
default=False,
help="Whether to use the dummy version of the dataset index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
return parser
@staticmethod
def add_ray_specific_args(parser):
# Ray cluster address.
parser.add_argument(
"--ray-address",
default="auto",
type=str,
help="The address of the Ray cluster to connect to. If not "
"specified, Ray will attempt to automatically detect the "
"cluster. Has no effect if pytorch is used as the distributed "
"retriever.",
)
parser.add_argument(
"--num_retrieval_workers",
type=int,
default=1,
help="The number of retrieval actors to use when Ray is selected"
"for the distributed retriever. Has no effect when "
"distributed_retriever is set to pytorch.",
)
return parser
def main(args=None, model=None) -> GenerativeQAModule:
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd())
parser = GenerativeQAModule.add_retriever_specific_args(parser)
args = args or parser.parse_args()
Path(args.output_dir).mkdir(exist_ok=True)
named_actors = []
if args.distributed_retriever == "ray" and args.gpus > 1:
if not is_ray_available():
raise RuntimeError("Please install Ray to use the Ray " "distributed retriever.")
# Connect to an existing Ray cluster.
try:
ray.init(address=args.ray_address, namespace="rag")
except (ConnectionError, ValueError):
logger.warning(
"Connection to Ray cluster failed. Make sure a Ray"
"cluster is running by either using Ray's cluster "
"launcher (`ray up`) or by manually starting Ray on "
"each node via `ray start --head` for the head node "
"and `ray start --address='<ip address>:6379'` for "
"additional nodes. See "
"https://docs.ray.io/en/master/cluster/index.html "
"for more info."
)
raise
# Create Ray actors only for rank 0.
if ("LOCAL_RANK" not in os.environ or int(os.environ["LOCAL_RANK"]) == 0) and (
"NODE_RANK" not in os.environ or int(os.environ["NODE_RANK"]) == 0
):
remote_cls = ray.remote(RayRetriever)
named_actors = [
remote_cls.options(name="retrieval_worker_{}".format(i)).remote()
for i in range(args.num_retrieval_workers)
]
else:
logger.info(
"Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format(
os.environ["NODE_RANK"], os.environ["LOCAL_RANK"]
)
)
named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)]
args.actor_handles = named_actors
assert args.actor_handles == named_actors
if model is None:
model: GenerativeQAModule = GenerativeQAModule(args)
dataset = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("/tmp")
or str(args.output_dir).startswith("/var")
):
training_logger = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
project = os.environ.get("WANDB_PROJECT", dataset)
training_logger = WandbLogger(name=model.output_dir.name, project=project)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}")
es_callback = (
get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
if args.early_stopping_patience >= 0
else False
)
trainer: pl.Trainer = generic_train(
model,
args,
logging_callback=Seq2SeqLoggingCallback(),
checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric),
early_stopping_callback=es_callback,
logger=training_logger,
custom_ddp_plugin=CustomDDP() if args.gpus > 1 else None,
profiler=pl.profiler.AdvancedProfiler() if args.profile else None,
)
pickle_save(model.hparams, model.output_dir / "hparams.pkl")
if not args.do_predict:
return model
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd())
parser = GenerativeQAModule.add_retriever_specific_args(parser)
parser = GenerativeQAModule.add_ray_specific_args(parser)
# Pytorch Lightning Profiler
parser.add_argument(
"--profile",
action="store_true",
help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.",
)
args = parser.parse_args()
main(args)
| 25,623 | 40.462783 | 197 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/distributed_pytorch_retriever.py | import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
logger = logging.getLogger(__name__)
class RagPyTorchDistributedRetriever(RagRetriever):
"""
A distributed retriever built on top of the ``torch.distributed`` communication package. During training all workers
initialize their own instance of the retriever, however, only the main worker loads the index into memory. The index is stored
in cpu memory. The index will also work well in a non-distributed setup.
Args:
config (:class:`~transformers.RagConfig`):
The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build.
question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`):
The tokenizer that was used to tokenize the question.
It is used to decode the question and then use the generator_tokenizer.
generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`):
The tokenizer used for the generator part of the RagModel.
index (:class:`~transformers.models.rag.retrieval_rag.Index`, optional, defaults to the one defined by the configuration):
If specified, use this index instead of the one built using the configuration
"""
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None):
super().__init__(
config,
question_encoder_tokenizer=question_encoder_tokenizer,
generator_tokenizer=generator_tokenizer,
index=index,
init_retrieval=False,
)
self.process_group = None
def init_retrieval(self, distributed_port: int):
"""
Retriever initialization function, needs to be called from the training process. The function sets some common parameters
and environment variables. On top of that, (only) the main process in the process group loads the index into memory.
Args:
distributed_port (:obj:`int`):
The port on which the main communication of the training run is carried out. We set the port for retrieval-related
communication as ``distributed_port + 1``.
"""
logger.info("initializing retrieval")
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized")
# needs to be set manually
os.environ["GLOO_SOCKET_IFNAME"] = self._infer_socket_ifname()
# avoid clash with the NCCL port
os.environ["MASTER_PORT"] = str(distributed_port + 1)
self.process_group = dist.new_group(ranks=None, backend="gloo")
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main")
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _is_main(self):
return dist.get_rank(group=self.process_group) == 0
def _scattered(self, scatter_list, target_shape, target_type=torch.float32):
target_tensor = torch.empty(target_shape, dtype=target_type)
dist.scatter(target_tensor, src=0, scatter_list=scatter_list, group=self.process_group)
return target_tensor
def _infer_socket_ifname(self):
addrs = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
ifname = next((addr for addr in addrs if addr.startswith("e")), None)
return ifname
def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]:
"""
Retrieves documents for specified ``question_hidden_states``. The main process, which has the access to the index stored in memory, gathers queries
from all the processes in the main training process group, performs the retrieval and scatters back the results.
Args:
question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`):
A batch of query vectors to retrieve with.
n_docs (:obj:`int`):
The number of docs retrieved per query.
Output:
retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)`
The retrieval embeddings of the retrieved docs per query.
doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`)
The ids of the documents in the index
doc_dicts (:obj:`List[dict]`):
The retrieved_doc_embeds examples per query.
"""
# single GPU training
if not dist.is_initialized():
doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
# distributed training
world_size = dist.get_world_size(group=self.process_group)
# gather logic
gather_list = None
if self._is_main():
gather_list = [torch.empty(question_hidden_states.shape, dtype=torch.float32) for _ in range(world_size)]
dist.gather(torch.tensor(question_hidden_states), dst=0, gather_list=gather_list, group=self.process_group)
# scatter logic
n_queries = question_hidden_states.shape[0]
scatter_ids = []
scatter_vectors = []
if self._is_main():
assert len(gather_list) == world_size
ids, vectors = self._main_retrieve(torch.cat(gather_list).numpy(), n_docs)
ids, vectors = torch.tensor(ids), torch.tensor(vectors)
scatter_ids = self._chunk_tensor(ids, n_queries)
scatter_vectors = self._chunk_tensor(vectors, n_queries)
doc_ids = self._scattered(scatter_ids, [n_queries, n_docs], target_type=torch.int64)
retrieved_doc_embeds = self._scattered(scatter_vectors, [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(doc_ids)
| 6,539 | 46.05036 | 155 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/test_distributed_retriever.py | import json
import os
import shutil
import sys
import tempfile
import unittest
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
import faiss
from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig
from transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available
from transformers.integrations import is_ray_available
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_ray
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip
if is_torch_available():
from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip
else:
RagPyTorchDistributedRetriever = None
if is_ray_available():
import ray # noqa: E402 # isort:skip
from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever # noqa: E402 # isort:skip
else:
ray = None
RagRayDistributedRetriever = None
RayRetriever = None
def require_distributed_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
:class:`~transformers.RagRetriever`.
These tests are skipped when respective libraries are not installed.
"""
if not (is_datasets_available() and is_faiss_available() and is_psutil_available()):
test_case = unittest.skip("test requires Datasets, Faiss, psutil")(test_case)
return test_case
@require_distributed_retrieval
class RagRetrieverTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_dummy_dataset(self):
dataset = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def get_dummy_pytorch_distributed_retriever(
self, init_retrieval: bool, port=12345
) -> RagPyTorchDistributedRetriever:
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
)
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagPyTorchDistributedRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
if init_retrieval:
retriever.init_retrieval(port)
return retriever
def get_dummy_ray_distributed_retriever(self, init_retrieval: bool) -> RagRayDistributedRetriever:
# Have to run in local mode because sys.path modifications at top of
# file are not propogated to remote workers.
# https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder
ray.init(local_mode=True)
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
)
remote_cls = ray.remote(RayRetriever)
workers = [remote_cls.remote() for _ in range(1)]
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = self.get_dummy_dataset()
retriever = RagRayDistributedRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
retrieval_workers=workers,
)
if init_retrieval:
retriever.init_retrieval()
return retriever
def get_dummy_custom_hf_index_pytorch_retriever(self, init_retrieval: bool, from_disk: bool, port=12345):
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
index_name="custom",
)
if from_disk:
config.passages_path = os.path.join(self.tmpdirname, "dataset")
config.index_path = os.path.join(self.tmpdirname, "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset"))
del dataset
retriever = RagPyTorchDistributedRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
else:
retriever = RagPyTorchDistributedRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
index=CustomHFIndex(config.retrieval_vector_size, dataset),
)
if init_retrieval:
retriever.init_retrieval(port)
return retriever
def get_dummy_custom_hf_index_ray_retriever(self, init_retrieval: bool, from_disk: bool):
# Have to run in local mode because sys.path modifications at top of
# file are not propogated to remote workers.
# https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder
ray.init(local_mode=True)
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
index_name="custom",
)
remote_cls = ray.remote(RayRetriever)
workers = [remote_cls.remote() for _ in range(1)]
if from_disk:
config.passages_path = os.path.join(self.tmpdirname, "dataset")
config.index_path = os.path.join(self.tmpdirname, "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset"))
del dataset
retriever = RagRayDistributedRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
retrieval_workers=workers,
index=CustomHFIndex.load_from_disk(
vector_size=config.retrieval_vector_size,
dataset_path=config.passages_path,
index_path=config.index_path,
),
)
else:
retriever = RagRayDistributedRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
retrieval_workers=workers,
index=CustomHFIndex(config.retrieval_vector_size, dataset),
)
if init_retrieval:
retriever.init_retrieval()
return retriever
def distributed_retriever_check(self, retriever: RagRetriever, hidden_states: np.array, n_docs: int) -> None:
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_pytorch_distributed_retriever_retrieve(self):
n_docs = 1
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
self.distributed_retriever_check(
self.get_dummy_pytorch_distributed_retriever(init_retrieval=True), hidden_states, n_docs
)
def test_custom_hf_index_pytorch_retriever_retrieve(self):
n_docs = 1
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
self.distributed_retriever_check(
self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=False),
hidden_states,
n_docs,
)
def test_custom_pytorch_distributed_retriever_retrieve_from_disk(self):
n_docs = 1
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
self.distributed_retriever_check(
self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=True),
hidden_states,
n_docs,
)
@require_ray
def test_ray_distributed_retriever_retrieve(self):
n_docs = 1
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
self.distributed_retriever_check(
self.get_dummy_ray_distributed_retriever(init_retrieval=True), hidden_states, n_docs
)
ray.shutdown()
@require_ray
def test_custom_hf_index_ray_retriever_retrieve(self):
n_docs = 1
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
with self.assertRaises(ValueError):
self.distributed_retriever_check(
self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=False),
hidden_states,
n_docs,
)
ray.shutdown()
@require_ray
def test_custom_ray_distributed_retriever_retrieve_from_disk(self):
n_docs = 1
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
self.distributed_retriever_check(
self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=True), hidden_states, n_docs
)
ray.shutdown()
| 13,794 | 39.693215 | 118 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/eval_rag.py | """ Evaluation script for RAG models."""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, f1_score # noqa: E402 # isort:skip
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def infer_model_type(model_name_or_path):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
return max(metric_fn(prediction, gt) for gt in ground_truths)
def get_scores(args, preds_path, gold_data_path):
hypos = [line.strip() for line in open(preds_path, "r").readlines()]
answers = []
if args.gold_data_mode == "qa":
data = pd.read_csv(gold_data_path, sep="\t", header=None)
for answer_list in data[1]:
ground_truths = ast.literal_eval(answer_list)
answers.append(ground_truths)
else:
references = [line.strip() for line in open(gold_data_path, "r").readlines()]
answers = [[reference] for reference in references]
f1 = em = total = 0
for prediction, ground_truths in zip(hypos, answers):
total += 1
em += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
em = 100.0 * em / total
f1 = 100.0 * f1 / total
logger.info(f"F1: {f1:.2f}")
logger.info(f"EM: {em:.2f}")
def get_precision_at_k(args, preds_path, gold_data_path):
k = args.k
hypos = [line.strip() for line in open(preds_path, "r").readlines()]
references = [line.strip() for line in open(gold_data_path, "r").readlines()]
em = total = 0
for hypo, reference in zip(hypos, references):
hypo_provenance = set(hypo.split("\t")[:k])
ref_provenance = set(reference.split("\t"))
total += 1
em += len(hypo_provenance & ref_provenance) / k
em = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}")
def evaluate_batch_retrieval(args, rag_model, questions):
def strip_title(title):
if title.startswith('"'):
title = title[1:]
if title.endswith('"'):
title = title[:-1]
return title
retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
questions,
return_tensors="pt",
padding=True,
truncation=True,
)["input_ids"].to(args.device)
question_enc_outputs = rag_model.rag.question_encoder(retriever_input_ids)
question_enc_pool_output = question_enc_outputs[0]
result = rag_model.retriever(
retriever_input_ids,
question_enc_pool_output.cpu().detach().to(torch.float32).numpy(),
prefix=rag_model.rag.generator.config.prefix,
n_docs=rag_model.config.n_docs,
return_tensors="pt",
)
all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
provenance_strings = []
for docs in all_docs:
provenance = [strip_title(title) for title in docs["title"]]
provenance_strings.append("\t".join(provenance))
return provenance_strings
def evaluate_batch_e2e(args, rag_model, questions):
with torch.no_grad():
inputs_dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
questions, return_tensors="pt", padding=True, truncation=True
)
input_ids = inputs_dict.input_ids.to(args.device)
attention_mask = inputs_dict.attention_mask.to(args.device)
outputs = rag_model.generate( # rag_model overwrites generate
input_ids,
attention_mask=attention_mask,
num_beams=args.num_beams,
min_length=args.min_length,
max_length=args.max_length,
early_stopping=False,
num_return_sequences=1,
bad_words_ids=[[0, 0]], # BART likes to repeat BOS tokens, dont allow it to generate more than one
)
answers = rag_model.retriever.generator_tokenizer.batch_decode(outputs, skip_special_tokens=True)
if args.print_predictions:
for q, a in zip(questions, answers):
logger.info("Q: {} - A: {}".format(q, a))
return answers
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token", "bart"],
type=str,
help="RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the model_name_or_path",
)
parser.add_argument(
"--index_name",
default=None,
choices=["exact", "compressed", "legacy"],
type=str,
help="RAG model retriever type",
)
parser.add_argument(
"--index_path",
default=None,
type=str,
help="Path to the retrieval index",
)
parser.add_argument("--n_docs", default=5, type=int, help="Number of retrieved docs")
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained checkpoints or model identifier from huggingface.co/models",
)
parser.add_argument(
"--eval_mode",
choices=["e2e", "retrieval"],
default="e2e",
type=str,
help="Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates precision@k.",
)
parser.add_argument("--k", default=1, type=int, help="k for the precision@k calculation")
parser.add_argument(
"--evaluation_set",
default=None,
type=str,
required=True,
help="Path to a file containing evaluation samples",
)
parser.add_argument(
"--gold_data_path",
default=None,
type=str,
required=True,
help="Path to a tab-separated file with gold samples",
)
parser.add_argument(
"--gold_data_mode",
default="qa",
type=str,
choices=["qa", "ans"],
help="Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string",
)
parser.add_argument(
"--predictions_path",
type=str,
default="predictions.txt",
help="Name of the predictions file, to be stored in the checkpoints directory",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--recalculate",
help="Recalculate predictions even if the prediction file exists",
action="store_true",
)
parser.add_argument(
"--num_beams",
default=4,
type=int,
help="Number of beams to be used when generating answers",
)
parser.add_argument("--min_length", default=1, type=int, help="Min length of the generated answers")
parser.add_argument("--max_length", default=50, type=int, help="Max length of the generated answers")
parser.add_argument(
"--print_predictions",
action="store_true",
help="If True, prints predictions while evaluating.",
)
parser.add_argument(
"--print_docs",
action="store_true",
help="If True, prints docs retried while generating.",
)
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return args
def main(args):
model_kwargs = {}
if args.model_type is None:
args.model_type = infer_model_type(args.model_name_or_path)
assert args.model_type is not None
if args.model_type.startswith("rag"):
model_class = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
model_kwargs["n_docs"] = args.n_docs
if args.index_name is not None:
model_kwargs["index_name"] = args.index_name
if args.index_path is not None:
model_kwargs["index_path"] = args.index_path
else:
model_class = BartForConditionalGeneration
checkpoints = (
[f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
score_fn = get_scores if args.eval_mode == "e2e" else get_precision_at_k
evaluate_batch_fn = evaluate_batch_e2e if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path))
score_fn(args, args.predictions_path, args.gold_data_path)
continue
logger.info("***** Running evaluation for {} *****".format(checkpoint))
logger.info(" Batch size = %d", args.eval_batch_size)
logger.info(" Predictions will be stored under {}".format(args.predictions_path))
if args.model_type.startswith("rag"):
retriever = RagRetriever.from_pretrained(checkpoint, **model_kwargs)
model = model_class.from_pretrained(checkpoint, retriever=retriever, **model_kwargs)
model.retriever.init_retrieval()
else:
model = model_class.from_pretrained(checkpoint, **model_kwargs)
model.to(args.device)
with open(args.evaluation_set, "r") as eval_file, open(args.predictions_path, "w") as preds_file:
questions = []
for line in tqdm(eval_file):
questions.append(line.strip())
if len(questions) == args.eval_batch_size:
answers = evaluate_batch_fn(args, model, questions)
preds_file.write("\n".join(answers) + "\n")
preds_file.flush()
questions = []
if len(questions) > 0:
answers = evaluate_batch_fn(args, model, questions)
preds_file.write("\n".join(answers))
preds_file.flush()
score_fn(args, args.predictions_path, args.gold_data_path)
if __name__ == "__main__":
args = get_args()
main(args)
| 11,101 | 34.469649 | 132 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/lightning_base.py | import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
MODEL_MODES = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeq2SeqLM,
"translation": AutoModelForSeq2SeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
arg_to_scheduler = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
arg_to_scheduler_choices = sorted(arg_to_scheduler.keys())
arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class BaseTransformer(pl.LightningModule):
def __init__(
self,
hparams: argparse.Namespace,
num_labels=None,
mode="base",
config=None,
tokenizer=None,
model=None,
**config_kwargs
):
"""Initialize a model, tokenizer and config."""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
else:
self.config: PretrainedConfig = config
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.model_type = MODEL_MODES[mode]
if model is None:
self.model = self.model_type.from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps()
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
optimizer = Adafactor(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
)
else:
optimizer = AdamW(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return [optimizer], [scheduler]
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
def total_steps(self) -> int:
"""The number of total training steps that will be run. Used for lr scheduler purposes."""
num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def setup(self, stage):
if stage == "test":
self.dataset_size = len(self.test_dataloader().dataset)
else:
self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
self.dataset_size = len(self.train_dataloader().dataset)
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False):
raise NotImplementedError("You must implement this for your task")
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("best_tfmr")
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--encoder_layerdrop",
type=float,
help="Encoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--decoder_layerdrop",
type=float,
help="Decoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--dropout",
type=float,
help="Dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--attention_dropout",
type=float,
help="Attention dropout probability (Optional). Goes into model.config",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler",
default="linear",
choices=arg_to_scheduler_choices,
metavar=arg_to_scheduler_metavar,
type=str,
help="Learning rate scheduler",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
parser.add_argument("--adafactor", action="store_true")
class InitCallback(pl.Callback):
# This method is better that using a custom DDP plugging with the latest pytorch-lightning (@shamanez)
def on_sanity_check_start(self, trainer, pl_module):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lrs)
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Validation results *****")
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Test results *****")
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def add_generic_args(parser, root_dir) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O2",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps",
dest="accumulate_grad_batches",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
def generic_train(
model: BaseTransformer,
args: argparse.Namespace,
early_stopping_callback=None,
logger=True, # can pass WandbLogger() here
custom_ddp_plugin=None,
extra_callbacks=[],
checkpoint_callback=None,
logging_callback=None,
**extra_train_kwargs
):
pl.seed_everything(args.seed)
# init model
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
# add custom checkpoints
if checkpoint_callback is None:
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1
)
if early_stopping_callback:
extra_callbacks.append(early_stopping_callback)
if logging_callback is None:
logging_callback = LoggingCallback()
train_params = {}
# TODO: remove with PyTorch 1.6 since pl uses native amp
if args.fp16:
train_params["precision"] = 16
# train_params["amp_level"] = args.fp16_opt_level
if args.gpus > 1:
train_params["accelerator"] = "auto" # "ddp"
train_params["strategy"] = "ddp"
train_params["accumulate_grad_batches"] = args.accumulate_grad_batches
train_params["profiler"] = None # extra_train_kwargs.get("profiler", None) #get unwanted logs
train_params["devices"] = "auto"
trainer = pl.Trainer.from_argparse_args(
args,
weights_summary=None,
callbacks=[logging_callback] + extra_callbacks + [checkpoint_callback] + [InitCallback()],
# plugins=[custom_ddp_plugin],
logger=logger,
**train_params,
)
if args.do_train:
trainer.fit(model)
return trainer
| 15,609 | 37.734491 | 124 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/callbacks_rag.py | import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
logger = logging.getLogger(__name__)
def get_checkpoint_callback(output_dir, metric):
"""Saves the best model by validation EM score."""
if metric == "rouge2":
exp = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
exp = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
exp = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this function."
)
checkpoint_callback = ModelCheckpoint(
dirpath=output_dir,
filename=exp,
monitor=f"val_{metric}",
mode="max",
save_top_k=3,
every_n_epochs=1, # maybe save a checkpoint every time val is run, not just end of epoch.
)
return checkpoint_callback
def get_early_stopping_callback(metric, patience):
return EarlyStopping(
monitor=f"val_{metric}", # does this need avg?
mode="min" if "loss" in metric else "max",
patience=patience,
verbose=True,
)
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(
self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True
) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****")
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
od = Path(pl_module.hparams.output_dir)
if type_path == "test":
results_file = od / "test_results.txt"
generations_file = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, "a+") as writer:
for key in sorted(metrics):
if key in ["log", "progress_bar", "preds"]:
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f"{key}: {val:.6f}\n"
writer.write(msg)
if not save_generations:
return
if "preds" in metrics:
content = "\n".join(metrics["preds"])
generations_file.open("w+").write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, "test")
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 4,428 | 36.854701 | 126 | py |
robust-transformers | robust-transformers-main/examples/research_projects/rag/_test_finetune_rag.py | import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
class RagFinetuneExampleTests(TestCasePlus):
def _create_dummy_data(self, data_dir):
os.makedirs(data_dir, exist_ok=True)
contents = {"source": "What is love ?", "target": "life"}
n_lines = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
content = "\n".join([contents[field]] * n_lines[split])
with open(os.path.join(data_dir, f"{split}.{field}"), "w") as f:
f.write(content)
def _run_finetune(self, gpus: int, distributed_retriever: str = "pytorch"):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
output_dir = os.path.join(tmp_dir, "output")
data_dir = os.path.join(tmp_dir, "data")
self._create_dummy_data(data_dir=data_dir)
testargs = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}")
if is_apex_available():
testargs.append("--fp16")
else:
testargs.append("--gpus=0")
testargs.append("--distributed_backend=ddp_cpu")
testargs.append("--num_processes=2")
cmd = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(cmd, env=self.get_env())
metrics_save_path = os.path.join(output_dir, "metrics.json")
with open(metrics_save_path) as f:
result = json.load(f)
return result
@require_torch_gpu
def test_finetune_gpu(self):
result = self._run_finetune(gpus=1)
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
@require_torch_multi_gpu
def test_finetune_multigpu(self):
result = self._run_finetune(gpus=2)
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
@require_torch_gpu
@require_ray
def test_finetune_gpu_ray_retrieval(self):
result = self._run_finetune(gpus=1, distributed_retriever="ray")
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
@require_torch_multi_gpu
@require_ray
def test_finetune_multigpu_ray_retrieval(self):
result = self._run_finetune(gpus=1, distributed_retriever="ray")
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
| 3,969 | 34.765766 | 85 | py |
robust-transformers | robust-transformers-main/examples/research_projects/pplm/run_pplm.py | #! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example command with bag of words:
python run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95
Example command with discriminator:
python run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95
"""
import argparse
import json
from operator import add
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from tqdm import trange
from pplm_classification_head import ClassificationHead
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers.file_utils import cached_path
PPLM_BOW = 1
PPLM_DISCRIM = 2
PPLM_BOW_DISCRIM = 3
SMALL_CONST = 1e-15
BIG_CONST = 1e10
BAG_OF_WORDS_ARCHIVE_MAP = {
"legal": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt",
"military": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt",
"politics": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt",
"religion": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt",
"science": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt",
"space": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt",
"technology": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt",
}
DISCRIMINATOR_MODELS_PARAMS = {
"clickbait": {
"url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt",
"class_size": 2,
"embed_size": 1024,
"class_vocab": {"non_clickbait": 0, "clickbait": 1},
"default_class": 1,
"pretrained_model": "gpt2-medium",
},
"sentiment": {
"url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt",
"class_size": 5,
"embed_size": 1024,
"class_vocab": {"very_positive": 2, "very_negative": 3},
"default_class": 3,
"pretrained_model": "gpt2-medium",
},
}
def top_k_filter(logits, k, probs=False):
"""
Masks everything but the k top entries as -infinity (1e10).
Used to mask logits such that e^-infinity -> 0 won't contribute to the
sum of the denominator.
"""
if k == 0:
return logits
else:
values = torch.topk(logits, k)[0]
batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
if probs:
return torch.where(logits < batch_mins, torch.ones_like(logits) * 0.0, logits)
return torch.where(logits < batch_mins, torch.ones_like(logits) * -BIG_CONST, logits)
def perturb_past(
past,
model,
last,
unpert_past=None,
unpert_logits=None,
accumulated_hidden=None,
grad_norms=None,
stepsize=0.01,
one_hot_bows_vectors=None,
classifier=None,
class_label=None,
loss_type=0,
num_iterations=3,
horizon_length=1,
window_length=0,
decay=False,
gamma=1.5,
kl_scale=0.01,
device="cuda",
):
# Generate inital perturbed past
grad_accumulator = [(np.zeros(p.shape).astype("float32")) for p in past]
if accumulated_hidden is None:
accumulated_hidden = 0
if decay:
decay_mask = torch.arange(0.0, 1.0 + SMALL_CONST, 1.0 / (window_length))[1:]
else:
decay_mask = 1.0
# TODO fix this comment (SUMANTH)
# Generate a mask is gradient perturbated is based on a past window
_, _, _, curr_length, _ = past[0].shape
if curr_length > window_length and window_length > 0:
ones_key_val_shape = tuple(past[0].shape[:-2]) + tuple([window_length]) + tuple(past[0].shape[-1:])
zeros_key_val_shape = (
tuple(past[0].shape[:-2]) + tuple([curr_length - window_length]) + tuple(past[0].shape[-1:])
)
ones_mask = torch.ones(ones_key_val_shape)
ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3)
ones_mask = ones_mask.permute(0, 1, 2, 4, 3)
window_mask = torch.cat((ones_mask, torch.zeros(zeros_key_val_shape)), dim=-2).to(device)
else:
window_mask = torch.ones_like(past[0]).to(device)
# accumulate perturbations for num_iterations
loss_per_iter = []
new_accumulated_hidden = None
for i in range(num_iterations):
print("Iteration ", i + 1)
curr_perturbation = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator]
# make sure p_.grad is not None
for p_ in curr_perturbation:
p_.retain_grad()
# Compute hidden using perturbed past
perturbed_past = list(map(add, past, curr_perturbation))
_, _, _, curr_length, _ = curr_perturbation[0].shape
lm_output = model(last, past_key_values=perturbed_past)
all_logits, all_hidden = lm_output["logits"], lm_output["hidden_states"]
hidden = all_hidden[-1]
new_accumulated_hidden = accumulated_hidden + torch.sum(hidden, dim=1).detach()
# TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth)
logits = all_logits[:, -1, :]
probs = nn.functional.softmax(logits, dim=-1)
loss = 0.0
loss_list = []
if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM:
for one_hot_bow in one_hot_bows_vectors:
bow_logits = torch.mm(probs, torch.t(one_hot_bow))
bow_loss = -torch.log(torch.sum(bow_logits))
loss += bow_loss
loss_list.append(bow_loss)
print(" pplm_bow_loss:", loss.data.cpu().numpy())
if loss_type == 2 or loss_type == 3:
ce_loss = nn.CrossEntropyLoss()
# TODO why we need to do this assignment and not just using unpert_past? (Sumanth)
curr_unpert_past = unpert_past
curr_probs = torch.unsqueeze(probs, dim=1)
wte = model.resize_token_embeddings()
for _ in range(horizon_length):
inputs_embeds = torch.matmul(curr_probs, wte.weight.data)
lm_output = model(past_key_values=curr_unpert_past, inputs_embeds=inputs_embeds)
curr_all_logits, curr_unpert_past, curr_all_hidden = (
lm_output["logits"],
lm_output["past_key_values"],
lm_output["hidden_states"],
)
curr_logits = curr_all_logits[:, -1, :]
curr_probs = nn.functional.softmax(curr_logits, dim=-1)
curr_probs = torch.unsqueeze(curr_probs, dim=1)
curr_hidden = curr_all_hidden[-1]
new_accumulated_hidden = new_accumulated_hidden + torch.sum(curr_hidden, dim=1)
prediction = classifier(new_accumulated_hidden / (curr_length + 1 + horizon_length))
label = torch.tensor(prediction.shape[0] * [class_label], device=device, dtype=torch.long)
discrim_loss = ce_loss(prediction, label)
print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy())
loss += discrim_loss
loss_list.append(discrim_loss)
kl_loss = 0.0
if kl_scale > 0.0:
unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1)
unpert_probs = unpert_probs + SMALL_CONST * (unpert_probs <= SMALL_CONST).float().to(device).detach()
correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(device).detach()
corrected_probs = probs + correction.detach()
kl_loss = kl_scale * ((corrected_probs * (corrected_probs / unpert_probs).log()).sum())
print(" kl_loss", kl_loss.data.cpu().numpy())
loss += kl_loss
loss_per_iter.append(loss.data.cpu().numpy())
print(" pplm_loss", (loss - kl_loss).data.cpu().numpy())
# compute gradients
loss.backward()
# calculate gradient norms
if grad_norms is not None and loss_type == PPLM_BOW:
grad_norms = [
torch.max(grad_norms[index], torch.norm(p_.grad * window_mask))
for index, p_ in enumerate(curr_perturbation)
]
else:
grad_norms = [
(torch.norm(p_.grad * window_mask) + SMALL_CONST) for index, p_ in enumerate(curr_perturbation)
]
# normalize gradients
grad = [
-stepsize * (p_.grad * window_mask / grad_norms[index] ** gamma).data.cpu().numpy()
for index, p_ in enumerate(curr_perturbation)
]
# accumulate gradient
grad_accumulator = list(map(add, grad, grad_accumulator))
# reset gradients, just to make sure
for p_ in curr_perturbation:
p_.grad.data.zero_()
# removing past from the graph
new_past = []
for p_ in past:
new_past.append(p_.detach())
past = new_past
# apply the accumulated perturbations to the past
grad_accumulator = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator]
pert_past = list(map(add, past, grad_accumulator))
return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter
def get_classifier(
name: Optional[str], class_label: Union[str, int], device: str
) -> Tuple[Optional[ClassificationHead], Optional[int]]:
if name is None:
return None, None
params = DISCRIMINATOR_MODELS_PARAMS[name]
classifier = ClassificationHead(class_size=params["class_size"], embed_size=params["embed_size"]).to(device)
if "url" in params:
resolved_archive_file = cached_path(params["url"])
elif "path" in params:
resolved_archive_file = params["path"]
else:
raise ValueError("Either url or path have to be specified in the discriminator model parameters")
classifier.load_state_dict(torch.load(resolved_archive_file, map_location=device))
classifier.eval()
if isinstance(class_label, str):
if class_label in params["class_vocab"]:
label_id = params["class_vocab"][class_label]
else:
label_id = params["default_class"]
print("class_label {} not in class_vocab".format(class_label))
print("available values are: {}".format(params["class_vocab"]))
print("using default class {}".format(label_id))
elif isinstance(class_label, int):
if class_label in set(params["class_vocab"].values()):
label_id = class_label
else:
label_id = params["default_class"]
print("class_label {} not in class_vocab".format(class_label))
print("available values are: {}".format(params["class_vocab"]))
print("using default class {}".format(label_id))
else:
label_id = params["default_class"]
return classifier, label_id
def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> List[List[List[int]]]:
bow_indices = []
for id_or_path in bag_of_words_ids_or_paths:
if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP:
filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path])
else:
filepath = id_or_path
with open(filepath, "r") as f:
words = f.read().strip().split("\n")
bow_indices.append([tokenizer.encode(word.strip(), add_prefix_space=True) for word in words])
return bow_indices
def build_bows_one_hot_vectors(bow_indices, tokenizer, device="cuda"):
if bow_indices is None:
return None
one_hot_bows_vectors = []
for single_bow in bow_indices:
single_bow = list(filter(lambda x: len(x) <= 1, single_bow))
single_bow = torch.tensor(single_bow).to(device)
num_words = single_bow.shape[0]
one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device)
one_hot_bow.scatter_(1, single_bow, 1)
one_hot_bows_vectors.append(one_hot_bow)
return one_hot_bows_vectors
def full_text_generation(
model,
tokenizer,
context=None,
num_samples=1,
device="cuda",
bag_of_words=None,
discrim=None,
class_label=None,
length=100,
stepsize=0.02,
temperature=1.0,
top_k=10,
sample=False,
num_iterations=3,
grad_length=10000,
horizon_length=1,
window_length=0,
decay=False,
gamma=1.5,
gm_scale=0.9,
kl_scale=0.01,
repetition_penalty=1.0,
**kwargs
):
classifier, class_id = get_classifier(discrim, class_label, device)
bow_indices = []
if bag_of_words:
bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer)
if bag_of_words and classifier:
print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.")
loss_type = PPLM_BOW_DISCRIM
elif bag_of_words:
loss_type = PPLM_BOW
print("Using PPLM-BoW")
elif classifier is not None:
loss_type = PPLM_DISCRIM
print("Using PPLM-Discrim")
else:
raise Exception("Specify either a bag of words or a discriminator")
unpert_gen_tok_text, _, _ = generate_text_pplm(
model=model,
tokenizer=tokenizer,
context=context,
device=device,
length=length,
sample=sample,
perturb=False,
repetition_penalty=repetition_penalty,
)
if device == "cuda":
torch.cuda.empty_cache()
pert_gen_tok_texts = []
discrim_losses = []
losses_in_time = []
for i in range(num_samples):
pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm(
model=model,
tokenizer=tokenizer,
context=context,
device=device,
perturb=True,
bow_indices=bow_indices,
classifier=classifier,
class_label=class_id,
loss_type=loss_type,
length=length,
stepsize=stepsize,
temperature=temperature,
top_k=top_k,
sample=sample,
num_iterations=num_iterations,
grad_length=grad_length,
horizon_length=horizon_length,
window_length=window_length,
decay=decay,
gamma=gamma,
gm_scale=gm_scale,
kl_scale=kl_scale,
repetition_penalty=repetition_penalty,
)
pert_gen_tok_texts.append(pert_gen_tok_text)
if classifier is not None:
discrim_losses.append(discrim_loss.data.cpu().numpy())
losses_in_time.append(loss_in_time)
if device == "cuda":
torch.cuda.empty_cache()
return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time
def generate_text_pplm(
model,
tokenizer,
context=None,
past=None,
device="cuda",
perturb=True,
bow_indices=None,
classifier=None,
class_label=None,
loss_type=0,
length=100,
stepsize=0.02,
temperature=1.0,
top_k=10,
sample=False,
num_iterations=3,
grad_length=10000,
horizon_length=1,
window_length=0,
decay=False,
gamma=1.5,
gm_scale=0.9,
kl_scale=0.01,
repetition_penalty=1.0,
):
output_so_far = None
if context:
context_t = torch.tensor(context, device=device, dtype=torch.long)
while len(context_t.shape) < 2:
context_t = context_t.unsqueeze(0)
output_so_far = context_t
# collect one hot vectors for bags of words
one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer, device)
grad_norms = None
last = None
unpert_discrim_loss = 0
loss_in_time = []
for i in trange(length, ascii=True):
# Get past/probs for current output, except for last word
# Note that GPT takes 2 inputs: past + current_token
# run model forward to obtain unperturbed
if past is None and output_so_far is not None:
last = output_so_far[:, -1:]
if output_so_far.shape[1] > 1:
past = model(output_so_far[:, :-1])["past_key_values"]
lm_output = model(output_so_far)
unpert_logits, unpert_past, unpert_all_hidden = (
lm_output["logits"],
lm_output["past_key_values"],
lm_output["hidden_states"],
)
unpert_last_hidden = unpert_all_hidden[-1]
# check if we are abowe grad max length
if i >= grad_length:
current_stepsize = stepsize * 0
else:
current_stepsize = stepsize
# modify the past if necessary
if not perturb or num_iterations == 0:
pert_past = past
else:
accumulated_hidden = unpert_last_hidden[:, :-1, :]
accumulated_hidden = torch.sum(accumulated_hidden, dim=1)
if past is not None:
pert_past, _, grad_norms, loss_this_iter = perturb_past(
past,
model,
last,
unpert_past=unpert_past,
unpert_logits=unpert_logits,
accumulated_hidden=accumulated_hidden,
grad_norms=grad_norms,
stepsize=current_stepsize,
one_hot_bows_vectors=one_hot_bows_vectors,
classifier=classifier,
class_label=class_label,
loss_type=loss_type,
num_iterations=num_iterations,
horizon_length=horizon_length,
window_length=window_length,
decay=decay,
gamma=gamma,
kl_scale=kl_scale,
device=device,
)
loss_in_time.append(loss_this_iter)
else:
pert_past = past
lm_output = model(last, past_key_values=pert_past)
pert_logits, past = (
lm_output["logits"],
lm_output["past_key_values"],
)
pert_logits = pert_logits[:, -1, :] / temperature # + SMALL_CONST
for token_idx in set(output_so_far[0].tolist()):
if pert_logits[0, token_idx] < 0:
pert_logits[0, token_idx] *= repetition_penalty
else:
pert_logits[0, token_idx] /= repetition_penalty
pert_probs = nn.functional.softmax(pert_logits, dim=-1)
if classifier is not None:
ce_loss = nn.CrossEntropyLoss()
prediction = classifier(torch.mean(unpert_last_hidden, dim=1))
label = torch.tensor([class_label], device=device, dtype=torch.long)
unpert_discrim_loss = ce_loss(prediction, label)
print("unperturbed discrim loss", unpert_discrim_loss.data.cpu().numpy())
else:
unpert_discrim_loss = 0
# Fuse the modified model and original model
if perturb:
unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1)
pert_probs = (pert_probs**gm_scale) * (unpert_probs ** (1 - gm_scale)) # + SMALL_CONST
pert_probs = top_k_filter(pert_probs, k=top_k, probs=True) # + SMALL_CONST
# rescale
if torch.sum(pert_probs) <= 1:
pert_probs = pert_probs / torch.sum(pert_probs)
else:
pert_logits = top_k_filter(pert_logits, k=top_k) # + SMALL_CONST
pert_probs = nn.functional.softmax(pert_logits, dim=-1)
# sample or greedy
if sample:
last = torch.multinomial(pert_probs, num_samples=1)
else:
_, last = torch.topk(pert_probs, k=1, dim=-1)
# update context/output_so_far appending the new token
output_so_far = last if output_so_far is None else torch.cat((output_so_far, last), dim=1)
print(tokenizer.decode(output_so_far.tolist()[0]))
return output_so_far, unpert_discrim_loss, loss_in_time
def set_generic_model_params(discrim_weights, discrim_meta):
if discrim_weights is None:
raise ValueError("When using a generic discriminator, discrim_weights need to be specified")
if discrim_meta is None:
raise ValueError("When using a generic discriminator, discrim_meta need to be specified")
with open(discrim_meta, "r") as discrim_meta_file:
meta = json.load(discrim_meta_file)
meta["path"] = discrim_weights
DISCRIMINATOR_MODELS_PARAMS["generic"] = meta
def run_pplm_example(
pretrained_model="gpt2-medium",
cond_text="",
uncond=False,
num_samples=1,
bag_of_words=None,
discrim=None,
discrim_weights=None,
discrim_meta=None,
class_label=-1,
length=100,
stepsize=0.02,
temperature=1.0,
top_k=10,
sample=False,
num_iterations=3,
grad_length=10000,
horizon_length=1,
window_length=0,
decay=False,
gamma=1.5,
gm_scale=0.9,
kl_scale=0.01,
seed=0,
no_cuda=False,
colorama=False,
repetition_penalty=1.0,
):
# set Random seed
torch.manual_seed(seed)
np.random.seed(seed)
# set the device
device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu"
if discrim == "generic":
set_generic_model_params(discrim_weights, discrim_meta)
if discrim is not None:
pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim]["pretrained_model"]
print("discrim = {}, pretrained_model set to discriminator's = {}".format(discrim, pretrained_model))
# load pretrained model
model = GPT2LMHeadModel.from_pretrained(pretrained_model, output_hidden_states=True)
model.to(device)
model.eval()
# load tokenizer
tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)
# Freeze GPT-2 weights
for param in model.parameters():
param.requires_grad = False
# figure out conditioning text
if uncond:
tokenized_cond_text = tokenizer.encode([tokenizer.bos_token])
else:
raw_text = cond_text
while not raw_text:
print("Did you forget to add `--cond_text`? ")
raw_text = input("Model prompt >>> ")
tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text)
print("= Prefix of sentence =")
print(tokenizer.decode(tokenized_cond_text))
print()
# generate unperturbed and perturbed texts
# full_text_generation returns:
# unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time
unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation(
model=model,
tokenizer=tokenizer,
context=tokenized_cond_text,
device=device,
num_samples=num_samples,
bag_of_words=bag_of_words,
discrim=discrim,
class_label=class_label,
length=length,
stepsize=stepsize,
temperature=temperature,
top_k=top_k,
sample=sample,
num_iterations=num_iterations,
grad_length=grad_length,
horizon_length=horizon_length,
window_length=window_length,
decay=decay,
gamma=gamma,
gm_scale=gm_scale,
kl_scale=kl_scale,
repetition_penalty=repetition_penalty,
)
# untokenize unperturbed text
unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0])
print("=" * 80)
print("= Unperturbed generated text =")
print(unpert_gen_text)
print()
generated_texts = []
bow_word_ids = set()
if bag_of_words and colorama:
bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer)
for single_bow_list in bow_indices:
# filtering all words in the list composed of more than 1 token
filtered = list(filter(lambda x: len(x) <= 1, single_bow_list))
# w[0] because we are sure w has only 1 item because previous fitler
bow_word_ids.update(w[0] for w in filtered)
# iterate through the perturbed texts
for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts):
try:
# untokenize unperturbed text
if colorama:
import colorama
pert_gen_text = ""
for word_id in pert_gen_tok_text.tolist()[0]:
if word_id in bow_word_ids:
pert_gen_text += "{}{}{}".format(
colorama.Fore.RED,
tokenizer.decode([word_id]),
colorama.Style.RESET_ALL,
)
else:
pert_gen_text += tokenizer.decode([word_id])
else:
pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0])
print("= Perturbed generated text {} =".format(i + 1))
print(pert_gen_text)
print()
except Exception as exc:
print("Ignoring error while generating perturbed text:", exc)
# keep the prefix, perturbed seq, original seq for each index
generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrained_model",
"-M",
type=str,
default="gpt2-medium",
help="pretrained model name or path to local checkpoint",
)
parser.add_argument("--cond_text", type=str, default="The lake", help="Prefix texts to condition on")
parser.add_argument("--uncond", action="store_true", help="Generate from end-of-text as prefix")
parser.add_argument(
"--num_samples",
type=int,
default=1,
help="Number of samples to generate from the modified latents",
)
parser.add_argument(
"--bag_of_words",
"-B",
type=str,
default=None,
help=(
"Bags of words used for PPLM-BoW. "
"Either a BOW id (see list in code) or a filepath. "
"Multiple BoWs separated by ;"
),
)
parser.add_argument(
"--discrim",
"-D",
type=str,
default=None,
choices=("clickbait", "sentiment", "toxicity", "generic"),
help="Discriminator to use",
)
parser.add_argument(
"--discrim_weights",
type=str,
default=None,
help="Weights for the generic discriminator",
)
parser.add_argument(
"--discrim_meta",
type=str,
default=None,
help="Meta information for the generic discriminator",
)
parser.add_argument(
"--class_label",
type=int,
default=-1,
help="Class label used for the discriminator",
)
parser.add_argument("--length", type=int, default=100)
parser.add_argument("--stepsize", type=float, default=0.02)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=10)
parser.add_argument("--sample", action="store_true", help="Generate from end-of-text as prefix")
parser.add_argument("--num_iterations", type=int, default=3)
parser.add_argument("--grad_length", type=int, default=10000)
parser.add_argument(
"--window_length",
type=int,
default=0,
help="Length of past which is being optimized; 0 corresponds to infinite window length",
)
parser.add_argument(
"--horizon_length",
type=int,
default=1,
help="Length of future to optimize over",
)
parser.add_argument("--decay", action="store_true", help="whether to decay or not")
parser.add_argument("--gamma", type=float, default=1.5)
parser.add_argument("--gm_scale", type=float, default=0.9)
parser.add_argument("--kl_scale", type=float, default=0.01)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--no_cuda", action="store_true", help="no cuda")
parser.add_argument("--colorama", action="store_true", help="colors keywords")
parser.add_argument(
"--repetition_penalty",
type=float,
default=1.0,
help="Penalize repetition. More than 1.0 -> less repetition",
)
args = parser.parse_args()
run_pplm_example(**vars(args))
| 29,044 | 34.078502 | 182 | py |
robust-transformers | robust-transformers-main/examples/research_projects/pplm/run_pplm_discrim_train.py | #! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import json
import math
import time
import numpy as np
import torch
import torch.optim as optim
import torch.utils.data as data
from nltk.tokenize.treebank import TreebankWordDetokenizer
from torch import nn
from torchtext import data as torchtext_data
from torchtext import datasets
from tqdm import tqdm, trange
from pplm_classification_head import ClassificationHead
from transformers import GPT2LMHeadModel, GPT2Tokenizer
torch.manual_seed(0)
np.random.seed(0)
EPSILON = 1e-10
example_sentence = "This is incredible! I love it, this is the best chicken I have ever had."
max_length_seq = 100
class Discriminator(nn.Module):
"""Transformer encoder followed by a Classification Head"""
def __init__(self, class_size, pretrained_model="gpt2-medium", cached_mode=False, device="cpu"):
super().__init__()
self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)
self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model)
self.embed_size = self.encoder.transformer.config.hidden_size
self.classifier_head = ClassificationHead(class_size=class_size, embed_size=self.embed_size)
self.cached_mode = cached_mode
self.device = device
def get_classifier(self):
return self.classifier_head
def train_custom(self):
for param in self.encoder.parameters():
param.requires_grad = False
self.classifier_head.train()
def avg_representation(self, x):
mask = x.ne(0).unsqueeze(2).repeat(1, 1, self.embed_size).float().to(self.device).detach()
hidden = self.encoder.transformer(x)["last_hidden_state"]
masked_hidden = hidden * mask
avg_hidden = torch.sum(masked_hidden, dim=1) / (torch.sum(mask, dim=1).detach() + EPSILON)
return avg_hidden
def forward(self, x):
if self.cached_mode:
avg_hidden = x.to(self.device)
else:
avg_hidden = self.avg_representation(x.to(self.device))
logits = self.classifier_head(avg_hidden)
probs = nn.functional.log_softmax(logits, dim=-1)
return probs
class Dataset(data.Dataset):
def __init__(self, X, y):
"""Reads source and target sequences from txt files."""
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
data = {}
data["X"] = self.X[index]
data["y"] = self.y[index]
return data
def collate_fn(data):
def pad_sequences(sequences):
lengths = [len(seq) for seq in sequences]
padded_sequences = torch.zeros(len(sequences), max(lengths)).long() # padding value = 0
for i, seq in enumerate(sequences):
end = lengths[i]
padded_sequences[i, :end] = seq[:end]
return padded_sequences, lengths
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
x_batch, _ = pad_sequences(item_info["X"])
y_batch = torch.tensor(item_info["y"], dtype=torch.long)
return x_batch, y_batch
def cached_collate_fn(data):
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
x_batch = torch.cat(item_info["X"], 0)
y_batch = torch.tensor(item_info["y"], dtype=torch.long)
return x_batch, y_batch
def train_epoch(data_loader, discriminator, optimizer, epoch=0, log_interval=10, device="cpu"):
samples_so_far = 0
discriminator.train_custom()
for batch_idx, (input_t, target_t) in enumerate(data_loader):
input_t, target_t = input_t.to(device), target_t.to(device)
optimizer.zero_grad()
output_t = discriminator(input_t)
loss = nn.functional.nll_loss(output_t, target_t)
loss.backward(retain_graph=True)
optimizer.step()
samples_so_far += len(input_t)
if batch_idx % log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch + 1,
samples_so_far,
len(data_loader.dataset),
100 * samples_so_far / len(data_loader.dataset),
loss.item(),
)
)
def evaluate_performance(data_loader, discriminator, device="cpu"):
discriminator.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for input_t, target_t in data_loader:
input_t, target_t = input_t.to(device), target_t.to(device)
output_t = discriminator(input_t)
# sum up batch loss
test_loss += nn.functional.nll_loss(output_t, target_t, reduction="sum").item()
# get the index of the max log-probability
pred_t = output_t.argmax(dim=1, keepdim=True)
correct += pred_t.eq(target_t.view_as(pred_t)).sum().item()
test_loss /= len(data_loader.dataset)
print(
"Performance on test set: "
"Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format(
test_loss, correct, len(data_loader.dataset), 100.0 * correct / len(data_loader.dataset)
)
)
def predict(input_sentence, model, classes, cached=False, device="cpu"):
input_t = model.tokenizer.encode(input_sentence)
input_t = torch.tensor([input_t], dtype=torch.long, device=device)
if cached:
input_t = model.avg_representation(input_t)
log_probs = model(input_t).data.cpu().numpy().flatten().tolist()
print("Input sentence:", input_sentence)
print(
"Predictions:",
", ".join("{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in zip(classes, log_probs)),
)
def get_cached_data_loader(dataset, batch_size, discriminator, shuffle=False, device="cpu"):
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, collate_fn=collate_fn)
xs = []
ys = []
for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)):
with torch.no_grad():
x = x.to(device)
avg_rep = discriminator.avg_representation(x).cpu().detach()
avg_rep_list = torch.unbind(avg_rep.unsqueeze(1))
xs += avg_rep_list
ys += y.cpu().numpy().tolist()
data_loader = torch.utils.data.DataLoader(
dataset=Dataset(xs, ys), batch_size=batch_size, shuffle=shuffle, collate_fn=cached_collate_fn
)
return data_loader
def train_discriminator(
dataset,
dataset_fp=None,
pretrained_model="gpt2-medium",
epochs=10,
batch_size=64,
log_interval=10,
save_model=False,
cached=False,
no_cuda=False,
):
device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu"
print("Preprocessing {} dataset...".format(dataset))
start = time.time()
if dataset == "SST":
idx2class = ["positive", "negative", "very positive", "very negative", "neutral"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
text = torchtext_data.Field()
label = torchtext_data.Field(sequential=False)
train_data, val_data, test_data = datasets.SST.splits(
text,
label,
fine_grained=True,
train_subtrees=True,
)
x = []
y = []
for i in trange(len(train_data), ascii=True):
seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"])
seq = discriminator.tokenizer.encode(seq)
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
x.append(seq)
y.append(class2idx[vars(train_data[i])["label"]])
train_dataset = Dataset(x, y)
test_x = []
test_y = []
for i in trange(len(test_data), ascii=True):
seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"])
seq = discriminator.tokenizer.encode(seq)
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
test_x.append(seq)
test_y.append(class2idx[vars(test_data[i])["label"]])
test_dataset = Dataset(test_x, test_y)
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 2,
}
elif dataset == "clickbait":
idx2class = ["non_clickbait", "clickbait"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
with open("datasets/clickbait/clickbait_train_prefix.txt") as f:
data = []
for i, line in enumerate(f):
try:
data.append(eval(line))
except Exception:
print("Error evaluating line {}: {}".format(i, line))
continue
x = []
y = []
with open("datasets/clickbait/clickbait_train_prefix.txt") as f:
for i, line in enumerate(tqdm(f, ascii=True)):
try:
d = eval(line)
seq = discriminator.tokenizer.encode(d["text"])
if len(seq) < max_length_seq:
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
print("Line {} is longer than maximum length {}".format(i, max_length_seq))
continue
x.append(seq)
y.append(d["label"])
except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 1,
}
elif dataset == "toxic":
idx2class = ["non_toxic", "toxic"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
x = []
y = []
with open("datasets/toxic/toxic_train.txt") as f:
for i, line in enumerate(tqdm(f, ascii=True)):
try:
d = eval(line)
seq = discriminator.tokenizer.encode(d["text"])
if len(seq) < max_length_seq:
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
print("Line {} is longer than maximum length {}".format(i, max_length_seq))
continue
x.append(seq)
y.append(int(np.sum(d["label"]) > 0))
except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 0,
}
else: # if dataset == "generic":
# This assumes the input dataset is a TSV with the following structure:
# class \t text
if dataset_fp is None:
raise ValueError("When generic dataset is selected, " "dataset_fp needs to be specified aswell.")
classes = set()
with open(dataset_fp) as f:
csv_reader = csv.reader(f, delimiter="\t")
for row in tqdm(csv_reader, ascii=True):
if row:
classes.add(row[0])
idx2class = sorted(classes)
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
x = []
y = []
with open(dataset_fp) as f:
csv_reader = csv.reader(f, delimiter="\t")
for i, row in enumerate(tqdm(csv_reader, ascii=True)):
if row:
label = row[0]
text = row[1]
try:
seq = discriminator.tokenizer.encode(text)
if len(seq) < max_length_seq:
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
print("Line {} is longer than maximum length {}".format(i, max_length_seq))
continue
x.append(seq)
y.append(class2idx[label])
except Exception:
print("Error tokenizing line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 0,
}
end = time.time()
print("Preprocessed {} data points".format(len(train_dataset) + len(test_dataset)))
print("Data preprocessing took: {:.3f}s".format(end - start))
if cached:
print("Building representation cache...")
start = time.time()
train_loader = get_cached_data_loader(train_dataset, batch_size, discriminator, shuffle=True, device=device)
test_loader = get_cached_data_loader(test_dataset, batch_size, discriminator, device=device)
end = time.time()
print("Building representation cache took: {:.3f}s".format(end - start))
else:
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn
)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, collate_fn=collate_fn)
if save_model:
with open("{}_classifier_head_meta.json".format(dataset), "w") as meta_file:
json.dump(discriminator_meta, meta_file)
optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
for epoch in range(epochs):
start = time.time()
print("\nEpoch", epoch + 1)
train_epoch(
discriminator=discriminator,
data_loader=train_loader,
optimizer=optimizer,
epoch=epoch,
log_interval=log_interval,
device=device,
)
evaluate_performance(data_loader=test_loader, discriminator=discriminator, device=device)
end = time.time()
print("Epoch took: {:.3f}s".format(end - start))
print("\nExample prediction")
predict(example_sentence, discriminator, idx2class, cached=cached, device=device)
if save_model:
# torch.save(discriminator.state_dict(),
# "{}_discriminator_{}.pt".format(
# args.dataset, epoch + 1
# ))
torch.save(
discriminator.get_classifier().state_dict(),
"{}_classifier_head_epoch_{}.pt".format(dataset, epoch + 1),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train a discriminator on top of GPT-2 representations")
parser.add_argument(
"--dataset",
type=str,
default="SST",
choices=("SST", "clickbait", "toxic", "generic"),
help="dataset to train the discriminator on."
"In case of generic, the dataset is expected"
"to be a TSBV file with structure: class \\t text",
)
parser.add_argument(
"--dataset_fp",
type=str,
default="",
help="File path of the dataset to use. " "Needed only in case of generic datadset",
)
parser.add_argument(
"--pretrained_model", type=str, default="gpt2-medium", help="Pretrained model to use as encoder"
)
parser.add_argument("--epochs", type=int, default=10, metavar="N", help="Number of training epochs")
parser.add_argument(
"--batch_size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)"
)
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument("--save_model", action="store_true", help="whether to save the model")
parser.add_argument("--cached", action="store_true", help="whether to cache the input representations")
parser.add_argument("--no_cuda", action="store_true", help="use to turn off cuda")
args = parser.parse_args()
train_discriminator(**(vars(args)))
| 18,788 | 34.92543 | 117 | py |
robust-transformers | robust-transformers-main/examples/research_projects/pplm/pplm_classification_head.py | from torch import nn
class ClassificationHead(nn.Module):
"""Classification Head for transformer encoders"""
def __init__(self, class_size, embed_size):
super().__init__()
self.class_size = class_size
self.embed_size = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
self.mlp = nn.Linear(embed_size, class_size)
def forward(self, hidden_state):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
logits = self.mlp(hidden_state)
return logits
| 651 | 31.6 | 68 | py |
robust-transformers | robust-transformers-main/examples/research_projects/deebert/test_glue_deebert.py | import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def get_setup_file():
parser = argparse.ArgumentParser()
parser.add_argument("-f")
args = parser.parse_args()
return args.f
class DeeBertTests(TestCasePlus):
def setup(self) -> None:
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
def run_and_check(self, args):
n_gpu = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, "run_glue_deebert.py")
with patch.object(sys, "argv", args):
result = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(value, 0.666)
@slow
@require_torch_non_multi_gpu
def test_glue_deebert_train(self):
train_args = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(train_args)
eval_args = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(eval_args)
entropy_eval_args = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(entropy_eval_args)
| 3,690 | 34.152381 | 109 | py |
robust-transformers | robust-transformers-main/examples/research_projects/deebert/run_glue_deebert.py | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import time
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import transformers
from src.modeling_highway_bert import DeeBertForSequenceClassification
from src.modeling_highway_roberta import DeeRobertaForSequenceClassification
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertTokenizer,
RobertaConfig,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers.trainer_utils import is_main_process
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert": (BertConfig, DeeBertForSequenceClassification, BertTokenizer),
"roberta": (RobertaConfig, DeeRobertaForSequenceClassification, RobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_wanted_result(result):
if "spearmanr" in result:
print_result = result["spearmanr"]
elif "f1" in result:
print_result = result["f1"]
elif "mcc" in result:
print_result = result["mcc"]
elif "acc" in result:
print_result = result["acc"]
else:
raise ValueError("Primary metric unclear in the results")
return print_result
def train(args, train_dataset, model, tokenizer, train_highway=False):
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
if train_highway:
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if ("highway" in n) and (not any(nd in n for nd in no_decay))
],
"weight_decay": args.weight_decay,
},
{
"params": [
p for n, p in model.named_parameters() if ("highway" in n) and (any(nd in n for nd in no_decay))
],
"weight_decay": 0.0,
},
]
else:
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if ("highway" not in n) and (not any(nd in n for nd in no_decay))
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if ("highway" not in n) and (any(nd in n for nd in no_decay))
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM, DistilBERT and RoBERTa don't use segment_ids
inputs["train_highway"] = train_highway
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", output_layer=-1, eval_highway=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
exit_layer_counter = {(i + 1): 0 for i in range(model.num_layers)}
st = time.time()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM, DistilBERT and RoBERTa don't use segment_ids
if output_layer >= 0:
inputs["output_layer"] = output_layer
outputs = model(**inputs)
if eval_highway:
exit_layer_counter[outputs[-1]] += 1
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_time = time.time() - st
logger.info("Eval time: {}".format(eval_time))
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
if eval_highway:
logger.info("Exit layer counter: {}".format(exit_layer_counter))
actual_cost = sum([l * c for l, c in exit_layer_counter.items()])
full_cost = len(eval_dataloader) * model.num_layers
logger.info("Expected saving: {}".format(actual_cost / full_cost))
if args.early_exit_entropy >= 0:
save_fname = (
args.plot_data_dir
+ "/"
+ args.model_name_or_path[2:]
+ "/entropy_{}.npy".format(args.early_exit_entropy)
)
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
print_result = get_wanted_result(result)
np.save(save_fname, np.array([exit_layer_counter, eval_time, actual_cost / full_cost, print_result]))
logger.info("Entropy={}\tResult={:.2f}".format(args.early_exit_entropy, 100 * print_result))
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if features[0].token_type_ids is None:
# For RoBERTa (a potential bug!)
all_token_type_ids = torch.tensor([[0] * args.max_seq_length for f in features], dtype=torch.long)
else:
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--plot_data_dir",
default="./plotting/",
type=str,
required=False,
help="The directory to store data for plotting figures.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--eval_each_highway", action="store_true", help="Set this flag to evaluate each highway.")
parser.add_argument(
"--eval_after_first_stage",
action="store_true",
help="Set this flag to evaluate after training only bert (not highway).",
)
parser.add_argument("--eval_highway", action="store_true", help="Set this flag if it's evaluating highway models")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--early_exit_entropy", default=-1, type=float, help="Entropy threshold for early exit.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.bert.init_highway_pooler()
elif args.model_type == "roberta":
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.roberta.init_highway_pooler()
else:
raise NotImplementedError()
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.eval_after_first_stage:
result = evaluate(args, model, tokenizer, prefix="")
print_result = get_wanted_result(result)
train(args, train_dataset, model, tokenizer, train_highway=True)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
elif args.model_type == "roberta":
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
else:
raise NotImplementedError()
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway)
print_result = get_wanted_result(result)
logger.info("Result: {}".format(print_result))
if args.eval_each_highway:
last_layer_results = print_result
each_layer_results = []
for i in range(model.num_layers):
logger.info("\n")
_result = evaluate(
args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway
)
if i + 1 < model.num_layers:
each_layer_results.append(get_wanted_result(_result))
each_layer_results.append(last_layer_results)
save_fname = args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/each_layer.npy"
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
np.save(save_fname, np.array(each_layer_results))
info_str = "Score of each layer:"
for i in range(model.num_layers):
info_str += " {:.2f}".format(100 * each_layer_results[i])
logger.info(info_str)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 31,693 | 42.297814 | 150 | py |
robust-transformers | robust-transformers-main/examples/research_projects/deebert/src/modeling_highway_bert.py | import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def entropy(x):
"""Calculate entropy of a pre-softmax logit Tensor"""
exp_x = torch.exp(x)
A = torch.sum(exp_x, dim=1) # sum of exp(x_i)
B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class DeeBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)])
self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]
def set_early_exit_entropy(self, x):
if (type(x) is float) or (type(x) is int):
for i in range(len(self.early_exit_entropy)):
self.early_exit_entropy[i] = x
else:
self.early_exit_entropy = x
def init_highway_pooler(self, pooler):
loaded_model = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
all_hidden_states = ()
all_attentions = ()
all_highway_exits = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
current_outputs = (hidden_states,)
if self.output_hidden_states:
current_outputs = current_outputs + (all_hidden_states,)
if self.output_attentions:
current_outputs = current_outputs + (all_attentions,)
highway_exit = self.highway[i](current_outputs)
# logits, pooled_output
if not self.training:
highway_logits = highway_exit[0]
highway_entropy = entropy(highway_logits)
highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
all_highway_exits = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i + 1)
else:
all_highway_exits = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
outputs = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). ",
BERT_START_DOCSTRING,
)
class DeeBertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = DeeBertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def init_highway_pooler(self):
self.encoder.init_highway_pooler(self.pooler)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class HighwayException(Exception):
def __init__(self, message, exit_layer):
self.message = message
self.exit_layer = exit_layer # start from 1!
class BertHighway(nn.Module):
"""A module to provide a shortcut
from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification)
"""
def __init__(self, config):
super().__init__()
self.pooler = BertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_outputs):
# Pooler
pooler_input = encoder_outputs[0]
pooler_output = self.pooler(pooler_input)
# "return" pooler_output
# BertModel
bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
pooled_output = bmodel_output[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """,
BERT_START_DOCSTRING,
)
class DeeBertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
self.bert = DeeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_layer=-1,
train_highway=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
exit_layer = self.num_layers
try:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
logits = outputs[0]
if not self.training:
original_entropy = entropy(logits)
highway_entropy = []
highway_logits_all = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
highway_losses = []
for highway_exit in outputs[-1]:
highway_logits = highway_exit[0]
if not self.training:
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(highway_loss)
if train_highway:
outputs = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
outputs = (loss,) + outputs
if not self.training:
outputs = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
outputs = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 17,668 | 43.506297 | 172 | py |
robust-transformers | robust-transformers-main/examples/research_projects/deebert/src/modeling_highway_roberta.py | from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",
ROBERTA_START_DOCSTRING,
)
class DeeRobertaModel(DeeBertModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.embeddings = RobertaEmbeddings(config)
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """,
ROBERTA_START_DOCSTRING,
)
class DeeRobertaForSequenceClassification(BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
self.roberta = DeeRobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_layer=-1,
train_highway=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
exit_layer = self.num_layers
try:
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
logits = outputs[0]
if not self.training:
original_entropy = entropy(logits)
highway_entropy = []
highway_logits_all = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
highway_losses = []
for highway_exit in outputs[-1]:
highway_logits = highway_exit[0]
if not self.training:
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(highway_loss)
if train_highway:
outputs = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
outputs = (loss,) + outputs
if not self.training:
outputs = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
outputs = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 6,791 | 42.261146 | 172 | py |
robust-transformers | robust-transformers-main/examples/research_projects/lxmert/modeling_frcnn.py | """
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2 && Huggingface Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import itertools
import math
import os
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, namedtuple
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch import nn
from torch.nn.modules.batchnorm import BatchNorm2d
from torchvision.ops import RoIPool
from torchvision.ops.boxes import batched_nms, nms
from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint
# other:
def norm_box(boxes, raw_sizes):
if not isinstance(boxes, torch.Tensor):
normalized_boxes = boxes.copy()
else:
normalized_boxes = boxes.clone()
normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1]
normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0]
return normalized_boxes
def pad_list_tensors(
list_tensors,
preds_per_image,
max_detections=None,
return_tensors=None,
padding=None,
pad_value=0,
location=None,
):
"""
location will always be cpu for np tensors
"""
if location is None:
location = "cpu"
assert return_tensors in {"pt", "np", None}
assert padding in {"max_detections", "max_batch", None}
new = []
if padding is None:
if return_tensors is None:
return list_tensors
elif return_tensors == "pt":
if not isinstance(list_tensors, torch.Tensor):
return torch.stack(list_tensors).to(location)
else:
return list_tensors.to(location)
else:
if not isinstance(list_tensors, list):
return np.array(list_tensors.to(location))
else:
return list_tensors.to(location)
if padding == "max_detections":
assert max_detections is not None, "specify max number of detections per batch"
elif padding == "max_batch":
max_detections = max(preds_per_image)
for i in range(len(list_tensors)):
too_small = False
tensor_i = list_tensors.pop(0)
if tensor_i.ndim < 2:
too_small = True
tensor_i = tensor_i.unsqueeze(-1)
assert isinstance(tensor_i, torch.Tensor)
tensor_i = nn.functional.pad(
input=tensor_i,
pad=(0, 0, 0, max_detections - preds_per_image[i]),
mode="constant",
value=pad_value,
)
if too_small:
tensor_i = tensor_i.squeeze(-1)
if return_tensors is None:
if location == "cpu":
tensor_i = tensor_i.cpu()
tensor_i = tensor_i.tolist()
if return_tensors == "np":
if location == "cpu":
tensor_i = tensor_i.cpu()
tensor_i = tensor_i.numpy()
else:
if location == "cpu":
tensor_i = tensor_i.cpu()
new.append(tensor_i)
if return_tensors == "np":
return np.stack(new, axis=0)
elif return_tensors == "pt" and not isinstance(new, torch.Tensor):
return torch.stack(new, dim=0)
else:
return list_tensors
def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd):
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = boxes.reshape(-1, 4)
_clip_box(boxes, image_shape)
boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# Select max scores
max_scores, max_classes = scores.max(1) # R x C --> R
num_objs = boxes.size(0)
boxes = boxes.view(-1, 4)
idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes
max_boxes = boxes[idxs] # Select max boxes according to the max scores.
# Apply NMS
keep = nms(max_boxes, max_scores, nms_thresh)
keep = keep[:maxd]
if keep.shape[-1] >= mind and keep.shape[-1] <= maxd:
max_boxes, max_scores = max_boxes[keep], max_scores[keep]
classes = max_classes[keep]
return max_boxes, max_scores, classes, keep
else:
return None
# Helper Functions
def _clip_box(tensor, box_size: Tuple[int, int]):
assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
tensor[:, 0].clamp_(min=0, max=w)
tensor[:, 1].clamp_(min=0, max=h)
tensor[:, 2].clamp_(min=0, max=w)
tensor[:, 3].clamp_(min=0, max=h)
def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor:
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def get_norm(norm, out_channels):
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
"": lambda x: x,
}[norm]
return norm(out_channels)
def _create_grid_offsets(size: List[int], stride: int, offset: float, device):
grid_height, grid_width = size
shifts_x = torch.arange(
offset * stride,
grid_width * stride,
step=stride,
dtype=torch.float32,
device=device,
)
shifts_y = torch.arange(
offset * stride,
grid_height * stride,
step=stride,
dtype=torch.float32,
device=device,
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
return shift_x, shift_y
def build_backbone(cfg):
input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
norm = cfg.RESNETS.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
caffe_maxpool=cfg.MODEL.MAX_POOL,
)
freeze_at = cfg.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
out_features = cfg.RESNETS.OUT_FEATURES
depth = cfg.RESNETS.DEPTH
num_groups = cfg.RESNETS.NUM_GROUPS
width_per_group = cfg.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.RESNETS.RES5_DILATION
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
stage_kargs["block_class"] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
def find_top_rpn_proposals(
proposals,
pred_objectness_logits,
images,
image_sizes,
nms_thresh,
pre_nms_topk,
post_nms_topk,
min_box_side_len,
training,
):
"""Args:
proposals (list[Tensor]): (L, N, Hi*Wi*A, 4).
pred_objectness_logits: tensors of length L.
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): before nms
post_nms_topk (int): after nms
min_box_side_len (float): minimum proposal box side
training (bool): True if proposals are to be used in training,
Returns:
results (List[Dict]): stores post_nms_topk object proposals for image i.
"""
num_images = len(images)
device = proposals[0].device
# 1. Select top-k anchor for every level and every image
topk_scores = [] # #lvl Tensor, each of shape N x topk
topk_proposals = []
level_ids = [] # #lvl Tensor, each of shape (topk,)
batch_idx = torch.arange(num_images, device=device)
for level_id, proposals_i, logits_i in zip(itertools.count(), proposals, pred_objectness_logits):
Hi_Wi_A = logits_i.shape[1]
num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
# sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
logits_i, idx = logits_i.sort(descending=True, dim=1)
topk_scores_i = logits_i[batch_idx, :num_proposals_i]
topk_idx = idx[batch_idx, :num_proposals_i]
# each is N x topk
topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4
topk_proposals.append(topk_proposals_i)
topk_scores.append(topk_scores_i)
level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
# 2. Concat all levels together
topk_scores = torch.cat(topk_scores, dim=1)
topk_proposals = torch.cat(topk_proposals, dim=1)
level_ids = torch.cat(level_ids, dim=0)
# if I change to batched_nms, I wonder if this will make a difference
# 3. For each image, run a per-level NMS, and choose topk results.
results = []
for n, image_size in enumerate(image_sizes):
boxes = topk_proposals[n]
scores_per_img = topk_scores[n]
# I will have to take a look at the boxes clip method
_clip_box(boxes, image_size)
# filter empty boxes
keep = _nonempty_boxes(boxes, threshold=min_box_side_len)
lvl = level_ids
if keep.sum().item() != len(boxes):
boxes, scores_per_img, lvl = (
boxes[keep],
scores_per_img[keep],
level_ids[keep],
)
keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh)
keep = keep[:post_nms_topk]
res = (boxes[keep], scores_per_img[keep])
results.append(res)
# I wonder if it would be possible for me to pad all these things.
return results
def subsample_labels(labels, num_samples, positive_fraction, bg_label):
"""
Returns:
pos_idx, neg_idx (Tensor):
1D vector of indices. The total length of both is `num_samples` or fewer.
"""
positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1)
negative = torch.nonzero(labels == bg_label).squeeze(1)
num_pos = int(num_samples * positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = num_samples - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx = positive[perm1]
neg_idx = negative[perm2]
return pos_idx, neg_idx
def add_ground_truth_to_proposals(gt_boxes, proposals):
raise NotImplementedError()
def add_ground_truth_to_proposals_single_image(gt_boxes, proposals):
raise NotImplementedError()
def _fmt_box_list(box_tensor, batch_index: int):
repeated_index = torch.full(
(len(box_tensor), 1),
batch_index,
dtype=box_tensor.dtype,
device=box_tensor.device,
)
return torch.cat((repeated_index, box_tensor), dim=1)
def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]):
pooler_fmt_boxes = torch.cat(
[_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)],
dim=0,
)
return pooler_fmt_boxes
def assign_boxes_to_levels(
box_lists: List[torch.Tensor],
min_level: int,
max_level: int,
canonical_box_size: int,
canonical_level: int,
):
box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists]))
# Eqn.(1) in FPN paper
level_assignments = torch.floor(canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8))
# clamp level to (min, max), in case the box size is too large or too small
# for the available feature maps
level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level)
return level_assignments.to(torch.int64) - min_level
# Helper Classes
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
def __new__(cls, *, channels=None, height=None, width=None, stride=None):
return super().__new__(cls, channels, height, width, stride)
class Box2BoxTransform(object):
"""
This R-CNN transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset
(dx * width, dy * height).
"""
def __init__(self, weights: Tuple[float, float, float, float], scale_clamp: float = None):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
if scale_clamp is not None:
self.scale_clamp = scale_clamp
else:
"""
Value for clamping large dw and dh predictions.
The heuristic is that we clamp such that dw and dh are no larger
than what would transform a 16px box into a 1000px box
(based on a small anchor, 16px, and a typical image size, 1000px).
"""
self.scale_clamp = math.log(1000.0 / 16)
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
target_widths = target_boxes[:, 2] - target_boxes[:, 0]
target_heights = target_boxes[:, 3] - target_boxes[:, 1]
target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
wx, wy, ww, wh = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy, dw, dh), dim=1)
assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be matched to zero or more predicted elements.
The matching is determined by the MxN match_quality_matrix, that characterizes
how well each (ground-truth, prediction)-pair match each other. For example,
if the elements are boxes, this matrix may contain box intersection-over-union
overlap values.
The matcher returns (a) a vector of length N containing the index of the
ground-truth element m in [0, M) that matches to prediction n in [0, N).
(b) a vector of length N containing the labels for each prediction.
"""
def __init__(
self,
thresholds: List[float],
labels: List[int],
allow_low_quality_matches: bool = False,
):
"""
Args:
thresholds (list): a list of thresholds used to stratify predictions
into levels.
labels (list): a list of values to label predictions belonging at
each level. A label can be one of {-1, 0, 1} signifying
{ignore, negative class, positive class}, respectively.
allow_low_quality_matches (bool): if True, produce additional matches or predictions with maximum match quality lower than high_threshold.
For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and
thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and
thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives.
"""
thresholds = thresholds[:]
assert thresholds[0] > 0
thresholds.insert(0, -float("inf"))
thresholds.append(float("inf"))
assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])
assert all([label_i in [-1, 0, 1] for label_i in labels])
assert len(labels) == len(thresholds) - 1
self.thresholds = thresholds
self.labels = labels
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted
elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`).
Returns:
matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M)
match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored
"""
assert match_quality_matrix.dim() == 2
if match_quality_matrix.numel() == 0:
default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)
# When no gt boxes exist, we define IOU = 0 and therefore set labels
# to `self.labels[0]`, which usually defaults to background class 0
# To choose to ignore instead,
# can make labels=[-1,0,-1,1] + set appropriate thresholds
default_match_labels = match_quality_matrix.new_full(
(match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8
)
return default_matches, default_match_labels
assert torch.all(match_quality_matrix >= 0)
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
low_high = (matched_vals >= low) & (matched_vals < high)
match_labels[low_high] = l
if self.allow_low_quality_matches:
self.set_low_quality_matches_(match_labels, match_quality_matrix)
return matches, match_labels
def set_low_quality_matches_(self, match_labels, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth G find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth G.
This function implements the RPN assignment case (i)
in Sec. 3.1.2 of Faster R-CNN.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find the highest quality match available, even if it is low, including ties.
# Note that the matches qualities must be positive due to the use of
# `torch.nonzero`.
of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None]
if of_quality_inds.dim() == 0:
(_, pred_inds_with_highest_quality) = of_quality_inds.unsqueeze(0).nonzero().unbind(1)
else:
(_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1)
match_labels[pred_inds_with_highest_quality] = 1
class RPNOutputs(object):
def __init__(
self,
box2box_transform,
anchor_matcher,
batch_size_per_image,
positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
boundary_threshold=0,
gt_boxes=None,
smooth_l1_beta=0.0,
):
"""
Args:
box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations.
anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to ground-truth boxes; used to determine training labels.
batch_size_per_image (int): number of proposals to sample when training
positive_fraction (float): target fraction of sampled proposals that should be positive
images (ImageList): :class:`ImageList` instance representing N input images
pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A, Hi, W)
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A*4, Hi, Wi)
anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l) stores anchor array for feature map l
boundary_threshold (int): if >= 0, then anchors that extend beyond the image boundary by more than boundary_thresh are not used in training.
gt_boxes (list[Boxes], optional): A list of N elements.
smooth_l1_beta (float): The transition point between L1 and L2 lossn. When set to 0, the loss becomes L1. When +inf, it is ignored
"""
self.box2box_transform = box2box_transform
self.anchor_matcher = anchor_matcher
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
self.pred_objectness_logits = pred_objectness_logits
self.pred_anchor_deltas = pred_anchor_deltas
self.anchors = anchors
self.gt_boxes = gt_boxes
self.num_feature_maps = len(pred_objectness_logits)
self.num_images = len(images)
self.boundary_threshold = boundary_threshold
self.smooth_l1_beta = smooth_l1_beta
def _get_ground_truth(self):
raise NotImplementedError()
def predict_proposals(self):
# pred_anchor_deltas: (L, N, ? Hi, Wi)
# anchors:(N, L, -1, B)
# here we loop over specific feature map, NOT images
proposals = []
anchors = self.anchors.transpose(0, 1)
for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas):
B = anchors_i.size(-1)
N, _, Hi, Wi = pred_anchor_deltas_i.shape
anchors_i = anchors_i.flatten(start_dim=0, end_dim=1)
pred_anchor_deltas_i = pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B)
proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i)
# Append feature map proposals with shape (N, Hi*Wi*A, B)
proposals.append(proposals_i.view(N, -1, B))
proposals = torch.stack(proposals)
return proposals
def predict_objectness_logits(self):
"""
Returns:
pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A).
"""
pred_objectness_logits = [
# Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score.permute(0, 2, 3, 1).reshape(self.num_images, -1)
for score in self.pred_objectness_logits
]
return pred_objectness_logits
# Main Classes
class Conv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if x.numel() == 0 and self.training:
assert not isinstance(self.norm, nn.SyncBatchNorm)
if x.numel() == 0:
assert not isinstance(self.norm, nn.GroupNorm)
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
empty = _NewEmptyTensorOp.apply(x, output_shape)
if self.training:
_dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
return empty + _dummy
else:
return empty
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
def forward(self, x):
return [nn.functional.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "res5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(nn.functional.relu(p6))
return [p6, p7]
class BasicStem(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False):
super().__init__()
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
self.caffe_maxpool = caffe_maxpool
# use pad 1 instead of pad zero
def forward(self, x):
x = self.conv1(x)
x = nn.functional.relu_(x)
if self.caffe_maxpool:
x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
else:
x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
class ResNetBlockBase(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def freeze(self):
for p in self.parameters():
p.requires_grad = False
return self
class BottleneckBlock(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
def forward(self, x):
out = self.conv1(x)
out = nn.functional.relu_(out)
out = self.conv2(out)
out = nn.functional.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = nn.functional.relu_(out)
return out
class Backbone(nn.Module, metaclass=ABCMeta):
def __init__(self):
super().__init__()
@abstractmethod
def forward(self):
pass
@property
def size_divisibility(self):
"""
Some backbones require the input height and width to be divisible by a specific integer. This is
typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match
dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required.
"""
return 0
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name],
)
for name in self._out_features
}
@property
def out_features(self):
"""deprecated"""
return self._out_features
@property
def out_feature_strides(self):
"""deprecated"""
return {f: self._out_feature_strides[f] for f in self._out_features}
@property
def out_feature_channels(self):
"""deprecated"""
return {f: self._out_feature_channels[f] for f in self._out_features}
class ResNet(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[ResNetBlock]]): several (typically 4) stages, each contains multiple :class:`ResNetBlockBase`.
num_classes (None or int): if None, will not perform classification.
out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in:
"stem", "linear", or "res2" ... If None, will return the output of the last layer.
"""
super(ResNet, self).__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, blocks in enumerate(stages):
for block in blocks:
assert isinstance(block, ResNetBlockBase), block
curr_channels = block.out_channels
stage = nn.Sequential(*blocks)
name = "res" + str(i + 2)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = blocks[-1].out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with std of 0.01."
nn.init.normal_(self.linear.weight, stddev=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name],
)
for name in self._out_features
}
@staticmethod
def make_stage(
block_class,
num_blocks,
first_stride=None,
*,
in_channels,
out_channels,
**kwargs,
):
"""
Usually, layers that produce the same feature map spatial size
are defined as one "stage".
Under such definition, stride_per_block[1:] should all be 1.
"""
if first_stride is not None:
assert "stride" not in kwargs and "stride_per_block" not in kwargs
kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1)
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for k, v in kwargs.items():
if k.endswith("_per_block"):
assert len(v) == num_blocks, (
f"Argument '{k}' of make_stage should have the " f"same length as num_blocks={num_blocks}."
)
newk = k[: -len("_per_block")]
assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs))
in_channels = out_channels
return blocks
class ROIPooler(nn.Module):
"""
Region of interest feature map pooler that supports pooling from one or more
feature maps.
"""
def __init__(
self,
output_size,
scales,
sampling_ratio,
canonical_box_size=224,
canonical_level=4,
):
super().__init__()
# assumption that stride is a power of 2.
min_level = -math.log2(scales[0])
max_level = -math.log2(scales[-1])
# a bunch of testing
assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level))
assert len(scales) == max_level - min_level + 1, "not pyramid"
assert 0 < min_level and min_level <= max_level
if isinstance(output_size, int):
output_size = (output_size, output_size)
assert len(output_size) == 2 and isinstance(output_size[0], int) and isinstance(output_size[1], int)
if len(scales) > 1:
assert min_level <= canonical_level and canonical_level <= max_level
assert canonical_box_size > 0
self.output_size = output_size
self.min_level = int(min_level)
self.max_level = int(max_level)
self.level_poolers = nn.ModuleList(RoIPool(output_size, spatial_scale=scale) for scale in scales)
self.canonical_level = canonical_level
self.canonical_box_size = canonical_box_size
def forward(self, feature_maps, boxes):
"""
Args:
feature_maps: List[torch.Tensor(N,C,W,H)]
box_lists: list[torch.Tensor])
Returns:
A tensor of shape(N*B, Channels, output_size, output_size)
"""
x = [v for v in feature_maps.values()]
num_level_assignments = len(self.level_poolers)
assert len(x) == num_level_assignments and len(boxes) == x[0].size(0)
pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes)
if num_level_assignments == 1:
return self.level_poolers[0](x[0], pooler_fmt_boxes)
level_assignments = assign_boxes_to_levels(
boxes,
self.min_level,
self.max_level,
self.canonical_box_size,
self.canonical_level,
)
num_boxes = len(pooler_fmt_boxes)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
output = torch.zeros(
(num_boxes, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)):
inds = torch.nonzero(level_assignments == level).squeeze(1)
pooler_fmt_boxes_level = pooler_fmt_boxes[inds]
output[inds] = pooler(x_level, pooler_fmt_boxes_level)
return output
class ROIOutputs(object):
def __init__(self, cfg, training=False):
self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA
self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.training = training
self.score_thresh = cfg.ROI_HEADS.SCORE_THRESH_TEST
self.min_detections = cfg.MIN_DETECTIONS
self.max_detections = cfg.MAX_DETECTIONS
nms_thresh = cfg.ROI_HEADS.NMS_THRESH_TEST
if not isinstance(nms_thresh, list):
nms_thresh = [nms_thresh]
self.nms_thresh = nms_thresh
def _predict_boxes(self, proposals, box_deltas, preds_per_image):
num_pred = box_deltas.size(0)
B = proposals[0].size(-1)
K = box_deltas.size(-1) // B
box_deltas = box_deltas.view(num_pred * K, B)
proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B)
proposals = proposals.reshape(-1, B)
boxes = self.box2box_transform.apply_deltas(box_deltas, proposals)
return boxes.view(num_pred, K * B).split(preds_per_image, dim=0)
def _predict_objs(self, obj_logits, preds_per_image):
probs = nn.functional.softmax(obj_logits, dim=-1)
probs = probs.split(preds_per_image, dim=0)
return probs
def _predict_attrs(self, attr_logits, preds_per_image):
attr_logits = attr_logits[..., :-1].softmax(-1)
attr_probs, attrs = attr_logits.max(-1)
return attr_probs.split(preds_per_image, dim=0), attrs.split(preds_per_image, dim=0)
@torch.no_grad()
def inference(
self,
obj_logits,
attr_logits,
box_deltas,
pred_boxes,
features,
sizes,
scales=None,
):
# only the pred boxes is the
preds_per_image = [p.size(0) for p in pred_boxes]
boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image)
obj_scores_all = self._predict_objs(obj_logits, preds_per_image) # list of length N
attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image)
features = features.split(preds_per_image, dim=0)
# fun for each image too, also I can experiment and do multiple images
final_results = []
zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes)
for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped):
for nms_t in self.nms_thresh:
outputs = do_nms(
boxes,
obj_scores,
size,
self.score_thresh,
nms_t,
self.min_detections,
self.max_detections,
)
if outputs is not None:
max_boxes, max_scores, classes, ids = outputs
break
if scales is not None:
scale_yx = scales[i]
max_boxes[:, 0::2] *= scale_yx[1]
max_boxes[:, 1::2] *= scale_yx[0]
final_results.append(
(
max_boxes,
classes,
max_scores,
attrs[ids],
attr_probs[ids],
features[i][ids],
)
)
boxes, classes, class_probs, attrs, attr_probs, roi_features = map(list, zip(*final_results))
return boxes, classes, class_probs, attrs, attr_probs, roi_features
def training(self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes):
pass
def __call__(
self,
obj_logits,
attr_logits,
box_deltas,
pred_boxes,
features,
sizes,
scales=None,
):
if self.training:
raise NotImplementedError()
return self.inference(
obj_logits,
attr_logits,
box_deltas,
pred_boxes,
features,
sizes,
scales=scales,
)
class Res5ROIHeads(nn.Module):
"""
ROIHeads perform all per-region computation in an R-CNN.
It contains logic of cropping the regions, extract per-region features
(by the res-5 block in this case), and make per-region predictions.
"""
def __init__(self, cfg, input_shape):
super().__init__()
self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE
self.positive_sample_fraction = cfg.ROI_HEADS.POSITIVE_FRACTION
self.in_features = cfg.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.ROI_HEADS.NUM_CLASSES
self.proposal_append_gt = cfg.ROI_HEADS.PROPOSAL_APPEND_GT
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.feature_channels = {k: v.channels for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.stage_channel_factor = 2**3 # res5 is 8x res2
self.out_channels = cfg.RESNETS.RES2_OUT_CHANNELS * self.stage_channel_factor
# self.proposal_matcher = Matcher(
# cfg.ROI_HEADS.IOU_THRESHOLDS,
# cfg.ROI_HEADS.IOU_LABELS,
# allow_low_quality_matches=False,
# )
pooler_resolution = cfg.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],)
sampling_ratio = cfg.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
res5_halve = cfg.ROI_BOX_HEAD.RES5HALVE
use_attr = cfg.ROI_BOX_HEAD.ATTR
num_attrs = cfg.ROI_BOX_HEAD.NUM_ATTRS
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
)
self.res5 = self._build_res5_block(cfg)
if not res5_halve:
"""
Modifications for VG in RoI heads:
1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1
2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2)
"""
self.res5[0].conv1.stride = (1, 1)
self.res5[0].shortcut.stride = (1, 1)
for i in range(3):
self.res5[i].conv2.padding = (2, 2)
self.res5[i].conv2.dilation = (2, 2)
self.box_predictor = FastRCNNOutputLayers(
self.out_channels,
self.num_classes,
self.cls_agnostic_bbox_reg,
use_attr=use_attr,
num_attrs=num_attrs,
)
def _build_res5_block(self, cfg):
stage_channel_factor = self.stage_channel_factor # res5 is 8x res2
num_groups = cfg.RESNETS.NUM_GROUPS
width_per_group = cfg.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = self.out_channels
stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1
norm = cfg.RESNETS.NORM
blocks = ResNet.make_stage(
BottleneckBlock,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
)
return nn.Sequential(*blocks)
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
return self.res5(x)
def forward(self, features, proposal_boxes, gt_boxes=None):
if self.training:
"""
see https://github.com/airsplay/py-bottom-up-attention/\
blob/master/detectron2/modeling/roi_heads/roi_heads.py
"""
raise NotImplementedError()
assert not proposal_boxes[0].requires_grad
box_features = self._shared_roi_transform(features, proposal_boxes)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(feature_pooled)
return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled
class AnchorGenerator(nn.Module):
"""
For a set of image sizes and feature maps, computes a set of anchors.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
sizes = cfg.ANCHOR_GENERATOR.SIZES
aspect_ratios = cfg.ANCHOR_GENERATOR.ASPECT_RATIOS
self.strides = [x.stride for x in input_shape]
self.offset = cfg.ANCHOR_GENERATOR.OFFSET
assert 0.0 <= self.offset < 1.0, self.offset
"""
sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i
1. given in absolute lengths in units of the input image;
2. they do not dynamically scale if the input image size changes.
aspect_ratios (list[list[float]])
strides (list[int]): stride of each input feature.
"""
self.num_features = len(self.strides)
self.cell_anchors = nn.ParameterList(self._calculate_anchors(sizes, aspect_ratios))
self._spacial_feat_dim = 4
def _calculate_anchors(self, sizes, aspect_ratios):
# If one size (or aspect ratio) is specified and there are multiple feature
# maps, then we "broadcast" anchors of that single size (or aspect ratio)
if len(sizes) == 1:
sizes *= self.num_features
if len(aspect_ratios) == 1:
aspect_ratios *= self.num_features
assert self.num_features == len(sizes)
assert self.num_features == len(aspect_ratios)
cell_anchors = [self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)]
return cell_anchors
@property
def box_dim(self):
return self._spacial_feat_dim
@property
def num_cell_anchors(self):
"""
Returns:
list[int]: Each int is the number of anchors at every pixel location, on that feature map.
"""
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def grid_anchors(self, grid_sizes):
anchors = []
for (size, stride, base_anchors) in zip(grid_sizes, self.strides, self.cell_anchors):
shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))
return anchors
def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):
"""
anchors are continuous geometric rectangles
centered on one feature map point sample.
We can later build the set of anchors
for the entire feature map by tiling these tensors
"""
anchors = []
for size in sizes:
area = size**2.0
for aspect_ratio in aspect_ratios:
w = math.sqrt(area / aspect_ratio)
h = aspect_ratio * w
x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
anchors.append([x0, y0, x1, y1])
return nn.Parameter(torch.tensor(anchors))
def forward(self, features):
"""
Args:
features List[torch.Tensor]: list of feature maps on which to generate anchors.
Returns:
torch.Tensor: a list of #image elements.
"""
num_images = features[0].size(0)
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps)
return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(num_images, dim=0)
class RPNHead(nn.Module):
"""
RPN classification and regression heads. Uses a 3x3 conv to produce a shared
hidden state from which one 1x1 conv predicts objectness logits for each anchor
and a second 1x1 conv predicts bounding-box deltas specifying how to deform
each anchor into an object proposal.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
# Standard RPN is shared across levels:
in_channels = [s.channels for s in input_shape]
assert len(set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
anchor_generator = AnchorGenerator(cfg, input_shape)
num_cell_anchors = anchor_generator.num_cell_anchors
box_dim = anchor_generator.box_dim
assert len(set(num_cell_anchors)) == 1, "Each level must have the same number of cell anchors"
num_cell_anchors = num_cell_anchors[0]
if cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1:
hid_channels = in_channels
else:
hid_channels = cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS
# Modifications for VG in RPN (modeling/proposal_generator/rpn.py)
# Use hidden dim instead fo the same dim as Res4 (in_channels)
# 3x3 conv for the hidden representation
self.conv = nn.Conv2d(in_channels, hid_channels, kernel_size=3, stride=1, padding=1)
# 1x1 conv for predicting objectness logits
self.objectness_logits = nn.Conv2d(hid_channels, num_cell_anchors, kernel_size=1, stride=1)
# 1x1 conv for predicting box2box transform deltas
self.anchor_deltas = nn.Conv2d(hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1)
for layer in [self.conv, self.objectness_logits, self.anchor_deltas]:
nn.init.normal_(layer.weight, std=0.01)
nn.init.constant_(layer.bias, 0)
def forward(self, features):
"""
Args:
features (list[Tensor]): list of feature maps
"""
pred_objectness_logits = []
pred_anchor_deltas = []
for x in features:
t = nn.functional.relu(self.conv(x))
pred_objectness_logits.append(self.objectness_logits(t))
pred_anchor_deltas.append(self.anchor_deltas(t))
return pred_objectness_logits, pred_anchor_deltas
class RPN(nn.Module):
"""
Region Proposal Network, introduced by the Faster R-CNN paper.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self.min_box_side_len = cfg.PROPOSAL_GENERATOR.MIN_SIZE
self.in_features = cfg.RPN.IN_FEATURES
self.nms_thresh = cfg.RPN.NMS_THRESH
self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE
self.positive_fraction = cfg.RPN.POSITIVE_FRACTION
self.smooth_l1_beta = cfg.RPN.SMOOTH_L1_BETA
self.loss_weight = cfg.RPN.LOSS_WEIGHT
self.pre_nms_topk = {
True: cfg.RPN.PRE_NMS_TOPK_TRAIN,
False: cfg.RPN.PRE_NMS_TOPK_TEST,
}
self.post_nms_topk = {
True: cfg.RPN.POST_NMS_TOPK_TRAIN,
False: cfg.RPN.POST_NMS_TOPK_TEST,
}
self.boundary_threshold = cfg.RPN.BOUNDARY_THRESH
self.anchor_generator = AnchorGenerator(cfg, [input_shape[f] for f in self.in_features])
self.box2box_transform = Box2BoxTransform(weights=cfg.RPN.BBOX_REG_WEIGHTS)
self.anchor_matcher = Matcher(
cfg.RPN.IOU_THRESHOLDS,
cfg.RPN.IOU_LABELS,
allow_low_quality_matches=True,
)
self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features])
def training(self, images, image_shapes, features, gt_boxes):
pass
def inference(self, outputs, images, image_shapes, features, gt_boxes=None):
outputs = find_top_rpn_proposals(
outputs.predict_proposals(),
outputs.predict_objectness_logits(),
images,
image_shapes,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_side_len,
self.training,
)
results = []
for img in outputs:
im_boxes, img_box_logits = img
img_box_logits, inds = img_box_logits.sort(descending=True)
im_boxes = im_boxes[inds]
results.append((im_boxes, img_box_logits))
(proposal_boxes, logits) = tuple(map(list, zip(*results)))
return proposal_boxes, logits
def forward(self, images, image_shapes, features, gt_boxes=None):
"""
Args:
images (torch.Tensor): input images of length `N`
features (dict[str: Tensor])
gt_instances
"""
# features is dict, key = block level, v = feature_map
features = [features[f] for f in self.in_features]
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
anchors = self.anchor_generator(features)
outputs = RPNOutputs(
self.box2box_transform,
self.anchor_matcher,
self.batch_size_per_image,
self.positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
self.boundary_threshold,
gt_boxes,
self.smooth_l1_beta,
)
# For RPN-only models, the proposals are the final output
if self.training:
raise NotImplementedError()
return self.training(outputs, images, image_shapes, features, gt_boxes)
else:
return self.inference(outputs, images, image_shapes, features, gt_boxes)
class FastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(
self,
input_size,
num_classes,
cls_agnostic_bbox_reg,
box_dim=4,
use_attr=False,
num_attrs=-1,
):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int)
cls_agnostic_bbox_reg (bool)
box_dim (int)
"""
super().__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
# (do + 1 for background class)
self.cls_score = nn.Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
self.use_attr = use_attr
if use_attr:
"""
Modifications for VG in RoI heads
Embedding: {num_classes + 1} --> {input_size // 8}
Linear: {input_size + input_size // 8} --> {input_size // 4}
Linear: {input_size // 4} --> {num_attrs + 1}
"""
self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8)
self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4)
self.attr_score = nn.Linear(input_size // 4, num_attrs + 1)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for item in [self.cls_score, self.bbox_pred]:
nn.init.constant_(item.bias, 0)
def forward(self, roi_features):
if roi_features.dim() > 2:
roi_features = torch.flatten(roi_features, start_dim=1)
scores = self.cls_score(roi_features)
proposal_deltas = self.bbox_pred(roi_features)
if self.use_attr:
_, max_class = scores.max(-1) # [b, c] --> [b]
cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256]
roi_features = torch.cat([roi_features, cls_emb], -1) # [b, 2048] + [b, 256] --> [b, 2304]
roi_features = self.fc_attr(roi_features)
roi_features = nn.functional.relu(roi_features)
attr_scores = self.attr_score(roi_features)
return scores, attr_scores, proposal_deltas
else:
return scores, proposal_deltas
class GeneralizedRCNN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.backbone = build_backbone(cfg)
self.proposal_generator = RPN(cfg, self.backbone.output_shape())
self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape())
self.roi_outputs = ROIOutputs(cfg)
self.to(self.device)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
use_cdn = kwargs.pop("use_cdn", True)
# Load config if we don't provide a configuration
if not isinstance(config, Config):
config_path = config if config is not None else pretrained_model_name_or_path
# try:
config = Config.from_pretrained(
config_path,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
)
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} ".format(
WEIGHTS_NAME,
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=WEIGHTS_NAME,
use_cdn=use_cdn,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
if resolved_archive_file is None:
raise EnvironmentError
except EnvironmentError:
msg = f"Can't load weights for '{pretrained_model_name_or_path}'."
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
print("loading weights file {}".format(archive_file))
else:
print("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config)
if state_dict is None:
try:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
state_dict = load_checkpoint(resolved_archive_file)
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
model_to_load = model
model_to_load.load_state_dict(state_dict)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
if len(unexpected_keys) > 0:
print(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
print(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
print(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
print(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
return model
def forward(
self,
images,
image_shapes,
gt_boxes=None,
proposals=None,
scales_yx=None,
**kwargs,
):
"""
kwargs:
max_detections (int), return_tensors {"np", "pt", None}, padding {None,
"max_detections"}, pad_value (int), location = {"cuda", "cpu"}
"""
if self.training:
raise NotImplementedError()
return self.inference(
images=images,
image_shapes=image_shapes,
gt_boxes=gt_boxes,
proposals=proposals,
scales_yx=scales_yx,
**kwargs,
)
@torch.no_grad()
def inference(
self,
images,
image_shapes,
gt_boxes=None,
proposals=None,
scales_yx=None,
**kwargs,
):
# run images through backbone
original_sizes = image_shapes * scales_yx
features = self.backbone(images)
# generate proposals if none are available
if proposals is None:
proposal_boxes, _ = self.proposal_generator(images, image_shapes, features, gt_boxes)
else:
assert proposals is not None
# pool object features from either gt_boxes, or from proposals
obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(features, proposal_boxes, gt_boxes)
# prepare FRCNN Outputs and select top proposals
boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs(
obj_logits=obj_logits,
attr_logits=attr_logits,
box_deltas=box_deltas,
pred_boxes=proposal_boxes,
features=feature_pooled,
sizes=image_shapes,
scales=scales_yx,
)
# will we pad???
subset_kwargs = {
"max_detections": kwargs.get("max_detections", None),
"return_tensors": kwargs.get("return_tensors", None),
"pad_value": kwargs.get("pad_value", 0),
"padding": kwargs.get("padding", None),
}
preds_per_image = torch.tensor([p.size(0) for p in boxes])
boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs)
classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs)
class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs)
attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs)
attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs)
roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs)
subset_kwargs["padding"] = None
preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs)
sizes = pad_list_tensors(image_shapes, None, **subset_kwargs)
normalized_boxes = norm_box(boxes, original_sizes)
return OrderedDict(
{
"obj_ids": classes,
"obj_probs": class_probs,
"attr_ids": attrs,
"attr_probs": attr_probs,
"boxes": boxes,
"sizes": sizes,
"preds_per_image": preds_per_image,
"roi_features": roi_features,
"normalized_boxes": normalized_boxes,
}
)
| 73,726 | 37.359521 | 152 | py |
robust-transformers | robust-transformers-main/examples/research_projects/lxmert/extracting_data.py | import getopt
import json
import os
# import numpy as np
import sys
from collections import OrderedDict
import datasets
import numpy as np
import torch
from modeling_frcnn import GeneralizedRCNN
from processing_image import Preprocess
from utils import Config
"""
USAGE:
``python extracting_data.py -i <img_dir> -o <dataset_file>.datasets <batch_size>``
"""
TEST = False
CONFIG = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned")
DEFAULT_SCHEMA = datasets.Features(
OrderedDict(
{
"attr_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")),
"attr_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")),
"boxes": datasets.Array2D((CONFIG.MAX_DETECTIONS, 4), dtype="float32"),
"img_id": datasets.Value("int32"),
"obj_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")),
"obj_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")),
"roi_features": datasets.Array2D((CONFIG.MAX_DETECTIONS, 2048), dtype="float32"),
"sizes": datasets.Sequence(length=2, feature=datasets.Value("float32")),
"preds_per_image": datasets.Value(dtype="int32"),
}
)
)
class Extract:
def __init__(self, argv=sys.argv[1:]):
inputdir = None
outputfile = None
subset_list = None
batch_size = 1
opts, args = getopt.getopt(argv, "i:o:b:s", ["inputdir=", "outfile=", "batch_size=", "subset_list="])
for opt, arg in opts:
if opt in ("-i", "--inputdir"):
inputdir = arg
elif opt in ("-o", "--outfile"):
outputfile = arg
elif opt in ("-b", "--batch_size"):
batch_size = int(arg)
elif opt in ("-s", "--subset_list"):
subset_list = arg
assert inputdir is not None # and os.path.isdir(inputdir), f"{inputdir}"
assert outputfile is not None and not os.path.isfile(outputfile), f"{outputfile}"
if subset_list is not None:
with open(os.path.realpath(subset_list)) as f:
self.subset_list = set(map(lambda x: self._vqa_file_split()[0], tryload(f)))
else:
self.subset_list = None
self.config = CONFIG
if torch.cuda.is_available():
self.config.model.device = "cuda"
self.inputdir = os.path.realpath(inputdir)
self.outputfile = os.path.realpath(outputfile)
self.preprocess = Preprocess(self.config)
self.model = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=self.config)
self.batch = batch_size if batch_size != 0 else 1
self.schema = DEFAULT_SCHEMA
def _vqa_file_split(self, file):
img_id = int(file.split(".")[0].split("_")[-1])
filepath = os.path.join(self.inputdir, file)
return (img_id, filepath)
@property
def file_generator(self):
batch = []
for i, file in enumerate(os.listdir(self.inputdir)):
if self.subset_list is not None and i not in self.subset_list:
continue
batch.append(self._vqa_file_split(file))
if len(batch) == self.batch:
temp = batch
batch = []
yield list(map(list, zip(*temp)))
for i in range(1):
yield list(map(list, zip(*batch)))
def __call__(self):
# make writer
if not TEST:
writer = datasets.ArrowWriter(features=self.schema, path=self.outputfile)
# do file generator
for i, (img_ids, filepaths) in enumerate(self.file_generator):
images, sizes, scales_yx = self.preprocess(filepaths)
output_dict = self.model(
images,
sizes,
scales_yx=scales_yx,
padding="max_detections",
max_detections=self.config.MAX_DETECTIONS,
pad_value=0,
return_tensors="np",
location="cpu",
)
output_dict["boxes"] = output_dict.pop("normalized_boxes")
if not TEST:
output_dict["img_id"] = np.array(img_ids)
batch = self.schema.encode_batch(output_dict)
writer.write_batch(batch)
if TEST:
break
# finalizer the writer
if not TEST:
num_examples, num_bytes = writer.finalize()
print(f"Success! You wrote {num_examples} entry(s) and {num_bytes >> 20} mb")
def tryload(stream):
try:
data = json.load(stream)
try:
data = list(data.keys())
except Exception:
data = [d["img_id"] for d in data]
except Exception:
try:
data = eval(stream.read())
except Exception:
data = stream.read().split("\n")
return data
if __name__ == "__main__":
extract = Extract(sys.argv[1:])
extract()
if not TEST:
dataset = datasets.Dataset.from_file(extract.outputfile)
# wala!
# print(np.array(dataset[0:2]["roi_features"]).shape)
| 5,254 | 34.033333 | 109 | py |
robust-transformers | robust-transformers-main/examples/research_projects/lxmert/utils.py | """
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :)
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import sha256
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
from PIL import Image
from tqdm.auto import tqdm
import cv2
import requests
import wget
from filelock import FileLock
from yaml import Loader, dump, load
try:
import torch
_torch_available = True
except ImportError:
_torch_available = False
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
default_cache_path = os.path.join(torch_cache_home, "transformers")
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
PATH = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
CONFIG = os.path.join(PATH, "config.yaml")
ATTRIBUTES = os.path.join(PATH, "attributes.txt")
OBJECTS = os.path.join(PATH, "objects.txt")
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
WEIGHTS_NAME = "pytorch_model.bin"
CONFIG_NAME = "config.yaml"
def load_labels(objs=OBJECTS, attrs=ATTRIBUTES):
vg_classes = []
with open(objs) as f:
for object in f.readlines():
vg_classes.append(object.split(",")[0].lower().strip())
vg_attrs = []
with open(attrs) as f:
for object in f.readlines():
vg_attrs.append(object.split(",")[0].lower().strip())
return vg_classes, vg_attrs
def load_checkpoint(ckp):
r = OrderedDict()
with open(ckp, "rb") as f:
ckp = pkl.load(f)["model"]
for k in copy.deepcopy(list(ckp.keys())):
v = ckp.pop(k)
if isinstance(v, np.ndarray):
v = torch.tensor(v)
else:
assert isinstance(v, torch.tensor), type(v)
r[k] = v
return r
class Config:
_pointer = {}
def __init__(self, dictionary: dict, name: str = "root", level=0):
self._name = name
self._level = level
d = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
k = copy.deepcopy(k)
v = copy.deepcopy(v)
if isinstance(v, dict):
v = Config(v, name=k, level=level + 1)
d[k] = v
setattr(self, k, v)
self._pointer = d
def __repr__(self):
return str(list((self._pointer.keys())))
def __setattr__(self, key, val):
self.__dict__[key] = val
self.__dict__[key.upper()] = val
levels = key.split(".")
last_level = len(levels) - 1
pointer = self._pointer
if len(levels) > 1:
for i, l in enumerate(levels):
if hasattr(self, l) and isinstance(getattr(self, l), Config):
setattr(getattr(self, l), ".".join(levels[i:]), val)
if l == last_level:
pointer[l] = val
else:
pointer = pointer[l]
def to_dict(self):
return self._pointer
def dump_yaml(self, data, file_name):
with open(f"{file_name}", "w") as stream:
dump(data, stream)
def dump_json(self, data, file_name):
with open(f"{file_name}", "w") as stream:
json.dump(data, stream)
@staticmethod
def load_yaml(config):
with open(config) as stream:
data = load(stream, Loader=Loader)
return data
def __str__(self):
t = " "
if self._name != "root":
r = f"{t * (self._level-1)}{self._name}:\n"
else:
r = ""
level = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(v, Config):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(v).__name__})\n"
self._level = level
return r[:-1]
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs):
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
return cls(config_dict)
@classmethod
def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
config_file = Config.load_yaml(resolved_config_file)
except EnvironmentError:
msg = "Can't load config for"
raise EnvironmentError(msg)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(resolved_config_file), kwargs
# quick compare tensors
def compare(in_tensor):
out_tensor = torch.load("dump.pt", map_location=in_tensor.device)
n1 = in_tensor.numpy()
n2 = out_tensor.numpy()[0]
print(n1.shape, n1[0, 0, :5])
print(n2.shape, n2[0, 0, :5])
assert np.allclose(
n1, n2, rtol=0.01, atol=0.1
), f"{sum([1 for x in np.isclose(n1, n2, rtol=0.01, atol=0.1).flatten() if x == False])/len(n1.flatten())*100:.4f} % element-wise mismatch"
raise Exception("tensors are all good")
# Hugging face functions below
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str:
endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
legacy_format = "/" not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def http_get(
url,
temp_file,
proxies=None,
resume_size=0,
user_agent=None,
):
ua = "python/{}".format(sys.version.split()[0])
if _torch_available:
ua += "; torch/{}".format(torch.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
):
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if not local_files_only:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
return None
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# If the download just completed while the lock was activated.
if os.path.exists(cache_path) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s",
url,
temp_file.name,
)
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
user_agent=user_agent,
)
os.replace(temp_file.name, cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
def url_to_filename(url, etag=None):
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent=None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
):
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def get_data(query, delim=","):
assert isinstance(query, str)
if os.path.isfile(query):
with open(query) as f:
data = eval(f.read())
else:
req = requests.get(query)
try:
data = requests.json()
except Exception:
data = req.content.decode()
assert data is not None, "could not connect"
try:
data = eval(data)
except Exception:
data = data.split("\n")
req.close()
return data
def get_image_from_url(url):
response = requests.get(url)
img = np.array(Image.open(BytesIO(response.content)))
return img
# to load legacy frcnn checkpoint from detectron
def load_frcnn_pkl_from_url(url):
fn = url.split("/")[-1]
if fn not in os.listdir(os.getcwd()):
wget.download(url)
with open(fn, "rb") as stream:
weights = pkl.load(stream)
model = weights.pop("model")
new = {}
for k, v in model.items():
new[k] = torch.from_numpy(v)
if "running_var" in k:
zero = torch.tensor([0])
k2 = k.replace("running_var", "num_batches_tracked")
new[k2] = zero
return new
def get_demo_path():
print(f"{os.path.abspath(os.path.join(PATH, os.pardir))}/demo.ipynb")
def img_tensorize(im, input_format="RGB"):
assert isinstance(im, str)
if os.path.isfile(im):
img = cv2.imread(im)
else:
img = get_image_from_url(im)
assert img is not None, f"could not connect to: {im}"
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if input_format == "RGB":
img = img[:, :, ::-1]
return img
def chunk(images, batch=1):
return (images[i : i + batch] for i in range(0, len(images), batch))
| 18,199 | 31.5 | 143 | py |
robust-transformers | robust-transformers-main/examples/research_projects/lxmert/visualizing_image.py | """
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import colorsys
import io
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cv2
from utils import img_tensorize
_SMALL_OBJ = 1000
class SingleImageViz:
def __init__(
self,
img,
scale=1.2,
edgecolor="g",
alpha=0.5,
linestyle="-",
saveas="test_out.jpg",
rgb=True,
pynb=False,
id2obj=None,
id2attr=None,
pad=0.7,
):
"""
img: an RGB image of shape (H, W, 3).
"""
if isinstance(img, torch.Tensor):
img = img.numpy().astype("np.uint8")
if isinstance(img, str):
img = img_tensorize(img)
assert isinstance(img, np.ndarray)
width, height = img.shape[1], img.shape[0]
fig = mplfigure.Figure(frameon=False)
dpi = fig.get_dpi()
width_in = (width * scale + 1e-2) / dpi
height_in = (height * scale + 1e-2) / dpi
fig.set_size_inches(width_in, height_in)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
ax.set_xlim(0.0, width)
ax.set_ylim(height)
self.saveas = saveas
self.rgb = rgb
self.pynb = pynb
self.img = img
self.edgecolor = edgecolor
self.alpha = 0.5
self.linestyle = linestyle
self.font_size = int(np.sqrt(min(height, width)) * scale // 3)
self.width = width
self.height = height
self.scale = scale
self.fig = fig
self.ax = ax
self.pad = pad
self.id2obj = id2obj
self.id2attr = id2attr
self.canvas = FigureCanvasAgg(fig)
def add_box(self, box, color=None):
if color is None:
color = self.edgecolor
(x0, y0, x1, y1) = box
width = x1 - x0
height = y1 - y0
self.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=color,
linewidth=self.font_size // 3,
alpha=self.alpha,
linestyle=self.linestyle,
)
)
def draw_boxes(self, boxes, obj_ids=None, obj_scores=None, attr_ids=None, attr_scores=None):
if len(boxes.shape) > 2:
boxes = boxes[0]
if len(obj_ids.shape) > 1:
obj_ids = obj_ids[0]
if len(obj_scores.shape) > 1:
obj_scores = obj_scores[0]
if len(attr_ids.shape) > 1:
attr_ids = attr_ids[0]
if len(attr_scores.shape) > 1:
attr_scores = attr_scores[0]
if isinstance(boxes, torch.Tensor):
boxes = boxes.numpy()
if isinstance(boxes, list):
boxes = np.array(boxes)
assert isinstance(boxes, np.ndarray)
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
sorted_idxs = np.argsort(-areas).tolist()
boxes = boxes[sorted_idxs] if boxes is not None else None
obj_ids = obj_ids[sorted_idxs] if obj_ids is not None else None
obj_scores = obj_scores[sorted_idxs] if obj_scores is not None else None
attr_ids = attr_ids[sorted_idxs] if attr_ids is not None else None
attr_scores = attr_scores[sorted_idxs] if attr_scores is not None else None
assigned_colors = [self._random_color(maximum=1) for _ in range(len(boxes))]
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
if obj_ids is not None:
labels = self._create_text_labels_attr(obj_ids, obj_scores, attr_ids, attr_scores)
for i in range(len(boxes)):
color = assigned_colors[i]
self.add_box(boxes[i], color)
self.draw_labels(labels[i], boxes[i], color)
def draw_labels(self, label, box, color):
x0, y0, x1, y1 = box
text_pos = (x0, y0)
instance_area = (y1 - y0) * (x1 - x0)
small = _SMALL_OBJ * self.scale
if instance_area < small or y1 - y0 < 40 * self.scale:
if y1 >= self.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.height * self.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
font_size *= 0.75 * self.font_size
self.draw_text(
text=label,
position=text_pos,
color=lighter_color,
)
def draw_text(
self,
text,
position,
color="g",
ha="left",
):
rotation = 0
font_size = self.font_size
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
bbox = {
"facecolor": "black",
"alpha": self.alpha,
"pad": self.pad,
"edgecolor": "none",
}
x, y = position
self.ax.text(
x,
y,
text,
size=font_size * self.scale,
family="sans-serif",
bbox=bbox,
verticalalignment="top",
horizontalalignment=ha,
color=color,
zorder=10,
rotation=rotation,
)
def save(self, saveas=None):
if saveas is None:
saveas = self.saveas
if saveas.lower().endswith(".jpg") or saveas.lower().endswith(".png"):
cv2.imwrite(
saveas,
self._get_buffer()[:, :, ::-1],
)
else:
self.fig.savefig(saveas)
def _create_text_labels_attr(self, classes, scores, attr_classes, attr_scores):
labels = [self.id2obj[i] for i in classes]
attr_labels = [self.id2attr[i] for i in attr_classes]
labels = [
f"{label} {score:.2f} {attr} {attr_score:.2f}"
for label, score, attr, attr_score in zip(labels, scores, attr_labels, attr_scores)
]
return labels
def _create_text_labels(self, classes, scores):
labels = [self.id2obj[i] for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(li, s * 100) for li, s in zip(labels, scores)]
return labels
def _random_color(self, maximum=255):
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not self.rgb:
ret = ret[::-1]
return ret
def _get_buffer(self):
if not self.pynb:
s, (width, height) = self.canvas.print_to_buffer()
if (width, height) != (self.width, self.height):
img = cv2.resize(self.img, (width, height))
else:
img = self.img
else:
buf = io.BytesIO() # works for cairo backend
self.canvas.print_rgba(buf)
width, height = self.width, self.height
s = buf.getvalue()
img = self.img
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
try:
import numexpr as ne # fuse them with numexpr
visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)")
except ImportError:
alpha = alpha.astype("float32") / 255.0
visualized_image = img * (1 - alpha) + rgb * alpha
return visualized_image.astype("uint8")
def _change_color_brightness(self, color, brightness_factor):
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
# Color map
_COLORS = (
np.array(
[
0.000,
0.447,
0.741,
0.850,
0.325,
0.098,
0.929,
0.694,
0.125,
0.494,
0.184,
0.556,
0.466,
0.674,
0.188,
0.301,
0.745,
0.933,
0.635,
0.078,
0.184,
0.300,
0.300,
0.300,
0.600,
0.600,
0.600,
1.000,
0.000,
0.000,
1.000,
0.500,
0.000,
0.749,
0.749,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
1.000,
0.667,
0.000,
1.000,
0.333,
0.333,
0.000,
0.333,
0.667,
0.000,
0.333,
1.000,
0.000,
0.667,
0.333,
0.000,
0.667,
0.667,
0.000,
0.667,
1.000,
0.000,
1.000,
0.333,
0.000,
1.000,
0.667,
0.000,
1.000,
1.000,
0.000,
0.000,
0.333,
0.500,
0.000,
0.667,
0.500,
0.000,
1.000,
0.500,
0.333,
0.000,
0.500,
0.333,
0.333,
0.500,
0.333,
0.667,
0.500,
0.333,
1.000,
0.500,
0.667,
0.000,
0.500,
0.667,
0.333,
0.500,
0.667,
0.667,
0.500,
0.667,
1.000,
0.500,
1.000,
0.000,
0.500,
1.000,
0.333,
0.500,
1.000,
0.667,
0.500,
1.000,
1.000,
0.500,
0.000,
0.333,
1.000,
0.000,
0.667,
1.000,
0.000,
1.000,
1.000,
0.333,
0.000,
1.000,
0.333,
0.333,
1.000,
0.333,
0.667,
1.000,
0.333,
1.000,
1.000,
0.667,
0.000,
1.000,
0.667,
0.333,
1.000,
0.667,
0.667,
1.000,
0.667,
1.000,
1.000,
1.000,
0.000,
1.000,
1.000,
0.333,
1.000,
1.000,
0.667,
1.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.143,
0.143,
0.143,
0.857,
0.857,
0.857,
1.000,
1.000,
1.000,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
| 13,420 | 25.842 | 100 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.