repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CP2 | CP2-main/main.py | import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from mmcv.utils import Config
import loader
import builder
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler('./log_cp2.txt')
handler.setLevel(level=logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(description='Copy-Paste Contrastive Pretraining on ImageNet')
parser.add_argument('--config', help='path to configuration file')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--num-images', default=1281167, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='total batch size over all GPUs')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--optim', default='sgd', help='optimizer')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--scalar-freq', default=100, type=int,
help='metrics writing frequency')
parser.add_argument('--ckpt-freq', default=1, type=int,
help='checkpoint saving frequency')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:10001', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multiple GPUs by default')
parser.set_defaults(multiprocessing_distributed=True)
parser.add_argument('--output-stride', default=16, type=int,
help='output stride of encoder')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
cfg = Config.fromfile(args.config)
data_dir = args.data
args.gpu = gpu
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
model = builder.CP2_MOCO(cfg)
print(model)
if args.distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
raise NotImplementedError("Only DistributedDataParallel is supported.")
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
if args.optim == 'adamw':
optimizer = torch.optim.AdamW(model.parameters(), args.lr,
weight_decay=0.01)
elif args.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise NotImplementedError("Only sgd and adamw optimizers are supported.")
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(data_dir, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
# simply use RandomErasing for Copy-Paste implementation:
# erase a random block of background image and replace the erased positions by foreground
augmentation_bg = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
transforms.RandomErasing(p=1., scale=(0.5, 0.8), ratio=(0.8, 1.25), value=0.)
]
train_dataset = datasets.ImageFolder(
traindir,
loader.TwoCropsTransform(transforms.Compose(augmentation)))
train_dataset_bg = datasets.ImageFolder(
traindir,
transforms.Compose(augmentation_bg))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, seed=0)
train_sampler_bg0 = torch.utils.data.distributed.DistributedSampler(train_dataset_bg, seed=1024)
train_sampler_bg1 = torch.utils.data.distributed.DistributedSampler(train_dataset_bg, seed=2048)
else:
train_sampler = None
train_sampler_bg0 = None
train_sampler_bg1 = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
train_loader_bg0 = torch.utils.data.DataLoader(
train_dataset_bg, batch_size=args.batch_size, shuffle=(train_sampler_bg0 is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler_bg0, drop_last=True)
train_loader_bg1 = torch.utils.data.DataLoader(
train_dataset_bg, batch_size=args.batch_size, shuffle=(train_sampler_bg1 is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler_bg1, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_sampler_bg0.set_epoch(epoch)
train_sampler_bg1.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train([train_loader, train_loader_bg0, train_loader_bg1], model, criterion, optimizer, epoch, args)
if epoch % args.ckpt_freq == args.ckpt_freq - 1:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, is_best=False, filename='checkpoint_{:04d}.pth.tar'.format(epoch))
def train(train_loader_list, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
# data_time = AverageMeter('Data', ':6.3f')
loss_i = AverageMeter('Loss_ins', ':.4f')
loss_d = AverageMeter('Loss_den', ':.4f')
acc_ins = AverageMeter('Acc_ins', ':6.2f')
acc_seg = AverageMeter('Acc_seg', ':6.2f')
train_loader, train_loader_bg0, train_loader_bg1 = train_loader_list
progress = ProgressMeter(
len(train_loader),
[batch_time, loss_i, loss_d, acc_ins, acc_seg],
prefix="Epoch: [{}]".format(epoch))
# cre_dense = nn.LogSoftmax(dim=1)
model.train()
end = time.time()
for i, ((images, _), (bg0, _), (bg1, _)) in enumerate(zip(train_loader, train_loader_bg0, train_loader_bg1)):
# data_time.update(time.time() - end)
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
bg0 = bg0.cuda(args.gpu, non_blocking=True)
bg1 = bg1.cuda(args.gpu, non_blocking=True)
# mask_q = mask_q.cuda(args.gpu, non_blocking=True)
# mask_k = mask_k.cuda(args.gpu, non_blocking=True)
mask_q, mask_k = (bg0[:, 0] == 0).float(), (bg1[:, 0] == 0).float()
image_q = images[0] * mask_q.unsqueeze(1) + bg0
image_k = images[1] * mask_k.unsqueeze(1) + bg1
# compute output
stride = args.output_stride
output_instance, output_dense, target_instance, target_dense, mask_dense = model(
image_q, image_k,
mask_q[:, stride//2::stride, stride//2::stride],
mask_k[:, stride//2::stride, stride//2::stride])
loss_instance = criterion(output_instance, target_instance)
# dense loss of softmax
output_dense_log = (-1.) * nn.LogSoftmax(dim=1)(output_dense)
output_dense_log = output_dense_log.reshape(output_dense_log.shape[0], -1)
loss_dense = torch.mean(
torch.mul(output_dense_log, target_dense).sum(dim=1) / target_dense.sum(dim=1))
loss = loss_instance + loss_dense * .2
acc1, acc5 = accuracy(output_instance, target_instance, topk=(1, 5))
acc_dense_pos = output_dense.reshape(output_dense.shape[0], -1).argmax(dim=1)
acc_dense = target_dense[torch.arange(0, target_dense.shape[0]), acc_dense_pos].float().mean() * 100.
loss_i.update(loss_instance.item(), images[0].size(0))
loss_d.update(loss_dense.item(), images[0].size(0))
acc_ins.update(acc1[0], images[0].size(0))
acc_seg.update(acc_dense.item(), images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print(' '.join(entries))
if torch.distributed.get_rank() == 0:
logger.info('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
lr = args.lr
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 16,052 | 39.537879 | 120 | py |
CP2 | CP2-main/builder.py | # The CP2_MoCo model is built upon moco v2 code base:
# https://github.com/facebookresearch/moco
# Copyright (c) Facebook, Inc. and its affilates. All Rights Reserved
import torch
import torch.nn as nn
from mmseg.models import build_segmentor
class CP2_MOCO(nn.Module):
def __init__(self, cfg, dim=128, K=65536, m=0.999, T=0.2):
super(CP2_MOCO, self).__init__()
self.K = K
self.m = m
self.T = T
self.encoder_q = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
self.encoder_k = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
if ptr + batch_size > self.K:
self.queue[:, ptr:self.K] = keys[0:self.K - ptr].T
self.queue[:, 0:ptr + batch_size - self.K] = keys[self.K - ptr:batch_size].T
else:
self.queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q, im_k, mask_q, mask_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
current_bs = im_q.size(0)
mask_q = mask_q.reshape(current_bs, -1)
mask_k = mask_k.reshape(current_bs, -1)
# compute query features
q = self.encoder_q(im_q) # queries: NxCx14x14
q = q.reshape(q.shape[0], q.shape[1], -1) # queries: NxCx196
q_dense = nn.functional.normalize(q, dim=1)
q_pos = nn.functional.normalize(torch.einsum('ncx,nx->nc', [q_dense, mask_q]), dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
k = k.reshape(k.shape[0], k.shape[1], -1) # keys: NxCx196
k_dense = nn.functional.normalize(k, dim=1) # NxCx120
k_pos = nn.functional.normalize(torch.einsum('ncx,nx->nc', [k_dense, mask_k]), dim=1)
# dense logits
logits_dense = torch.einsum('ncx,ncy->nxy', [q_dense, k_dense]) #Nx196x196
labels_dense = torch.einsum('nx,ny->nxy', [mask_q, mask_k])
labels_dense = labels_dense.reshape(labels_dense.shape[0], -1)
mask_dense = torch.einsum('x,ny->nxy', [torch.ones(196).cuda(), mask_k])
mask_dense = mask_dense.reshape(mask_dense.shape[0], -1)
# moco logits
l_pos = torch.einsum('nc,nc->n', [q_pos, k_pos]).unsqueeze(-1)
l_neg = torch.einsum('nc,ck->nk', [q_pos, self.queue.clone().detach()])
logits_moco = torch.cat([l_pos, l_neg], dim=1)
labels_moco = torch.zeros(logits_moco.shape[0], dtype=torch.long).cuda()
# apply temperature
logits_moco /= self.T
# dequeue and enqueue
self._dequeue_and_enqueue(k_pos)
return logits_moco, logits_dense, labels_moco, labels_dense, mask_dense
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 6,009 | 34.146199 | 97 | py |
CP2 | CP2-main/tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# import torch.distributed as dist
#
# dist.init_process_group('gloo', init_method='file:///temp/somefile', rank=0, world_size=1)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument(
'config',
help='train config file path')
parser.add_argument(
'--work-dir',
help='the dir to save logs and models')
parser.add_argument(
'--load-from',
help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# passing checkpoint meta for saving best checkpoint
meta.update(cfg.checkpoint_config.meta)
train_segmentor(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
print('start training')
main()
| 6,051 | 33.19209 | 92 | py |
CP2 | CP2-main/mmseg/apis/inference.py | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
def init_segmentor(config, checkpoint=None, device='cuda:0'):
"""Initialize a segmentor from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
Use 'cpu' for loading model on CPU.
Returns:
nn.Module: The constructed segmentor.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
config.model.train_cfg = None
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage:
"""A simple pipeline to load image."""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_segmentor(model, img):
"""Inference image(s) with the segmentor.
Args:
model (nn.Module): The loaded segmentor.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
(list[Tensor]): The segmentation result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
data['img_metas'] = [i.data[0] for i in data['img_metas']]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def show_result_pyplot(model,
img,
result,
palette=None,
fig_size=(15, 10),
opacity=0.5,
title='',
block=True):
"""Visualize the segmentation results on the image.
Args:
model (nn.Module): The loaded segmentor.
img (str or np.ndarray): Image filename or loaded image.
result (list): The segmentation result.
palette (list[list[int]]] | None): The palette of segmentation
map. If None is given, random palette will be generated.
Default: None
fig_size (tuple): Figure size of the pyplot figure.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
title (str): The title of pyplot figure.
Default is ''.
block (bool): Whether to block the pyplot figure.
Default is True.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(
img, result, palette=palette, show=False, opacity=opacity)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.title(title)
plt.tight_layout()
plt.show(block=block)
| 4,582 | 32.698529 | 79 | py |
CP2 | CP2-main/mmseg/apis/test.py | import os.path as osp
import pickle
import shutil
import tempfile
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
efficient_test=False,
opacity=0.5):
"""Test with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
show (bool): Whether show results during inference. Default: False.
out_dir (str, optional): If specified, the results will be dumped into
the directory to save output results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result,
palette=dataset.PALETTE,
show=show,
out_file=out_file,
opacity=opacity)
if isinstance(result, list):
if efficient_test:
result = [np2tmp(_) for _ in result]
results.extend(result)
else:
if efficient_test:
result = np2tmp(result)
results.append(result)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model,
data_loader,
tmpdir=None,
gpu_collect=False,
efficient_test=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if isinstance(result, list):
if efficient_test:
result = [np2tmp(_) for _ in result]
results.extend(result)
else:
if efficient_test:
result = np2tmp(result)
results.append(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
"""Collect results with CPU."""
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
"""Collect results with GPU."""
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 8,209 | 33.351464 | 79 | py |
CP2 | CP2-main/mmseg/apis/train.py | import random
import warnings
import time
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_root_logger
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_segmentor(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch segmentor training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
# optimizer = build_optimizer(model.module.decode_head.conv_seg, cfg.optimizer)
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
# print("runner has run")
# time.sleep(10)
runner.run(data_loaders, cfg.workflow)
| 4,066 | 32.61157 | 83 | py |
CP2 | CP2-main/mmseg/core/evaluation/eval_hooks.py | import os.path as osp
import torch.distributed as dist
from mmcv.runner import DistEvalHook as _DistEvalHook
from mmcv.runner import EvalHook as _EvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class EvalHook(_EvalHook):
"""Single GPU EvalHook, with efficient test support.
Args:
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
If set to True, it will perform by epoch. Otherwise, by iteration.
Default: False.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.efficient_test = efficient_test
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmseg.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
class DistEvalHook(_DistEvalHook):
"""Distributed EvalHook, with efficient test support.
Args:
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
If set to True, it will perform by epoch. Otherwise, by iteration.
Default: False.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.efficient_test = efficient_test
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmseg.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
| 3,528 | 36.147368 | 79 | py |
CP2 | CP2-main/mmseg/core/evaluation/metrics.py | from collections import OrderedDict
import mmcv
import numpy as np
import torch
def f_score(precision, recall, beta=1):
"""calcuate the f-score value.
Args:
precision (float | torch.Tensor): The precision value.
recall (float | torch.Tensor): The recall value.
beta (int): Determines the weight of recall in the combined score.
Default: False.
Returns:
[torch.tensor]: The f-score value.
"""
score = (1 + beta**2) * (precision * recall) / (
(beta**2 * precision) + recall)
return score
def intersect_and_union(pred_label,
label,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate intersection and Union.
Args:
pred_label (ndarray | str): Prediction segmentation map
or predict result filename.
label (ndarray | str): Ground truth segmentation map
or label filename.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. The parameter will
work only when label is str. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. The parameter will
work only when label is str. Default: False.
Returns:
torch.Tensor: The intersection of prediction and ground truth
histogram on all classes.
torch.Tensor: The union of prediction and ground truth histogram on
all classes.
torch.Tensor: The prediction histogram on all classes.
torch.Tensor: The ground truth histogram on all classes.
"""
if isinstance(pred_label, str):
pred_label = torch.from_numpy(np.load(pred_label))
else:
pred_label = torch.from_numpy((pred_label))
if isinstance(label, str):
label = torch.from_numpy(
mmcv.imread(label, flag='unchanged', backend='pillow'))
else:
label = torch.from_numpy(label)
if label_map is not None:
for old_id, new_id in label_map.items():
label[label == old_id] = new_id
if reduce_zero_label:
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect = torch.histc(
intersect.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_pred_label = torch.histc(
pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_label = torch.histc(
label.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def total_intersect_and_union(results,
gt_seg_maps,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Total Intersection and Union.
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
ndarray: The intersection of prediction and ground truth histogram
on all classes.
ndarray: The union of prediction and ground truth histogram on all
classes.
ndarray: The prediction histogram on all classes.
ndarray: The ground truth histogram on all classes.
"""
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)
for i in range(num_imgs):
area_intersect, area_union, area_pred_label, area_label = \
intersect_and_union(
results[i], gt_seg_maps[i], num_classes, ignore_index,
label_map, reduce_zero_label)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label
def mean_iou(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
dict[str, float | ndarray]:
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<IoU> ndarray: Per category IoU, shape (num_classes, ).
"""
iou_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mIoU'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return iou_result
def mean_dice(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Dice (mDice)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<Dice> ndarray: Per category dice, shape (num_classes, ).
"""
dice_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mDice'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return dice_result
def mean_fscore(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
beta=1):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
beta (int): Determines the weight of recall in the combined score.
Default: False.
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Fscore> ndarray: Per category recall, shape (num_classes, ).
<Precision> ndarray: Per category precision, shape (num_classes, ).
<Recall> ndarray: Per category f-score, shape (num_classes, ).
"""
fscore_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mFscore'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label,
beta=beta)
return fscore_result
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
beta=1):
"""Calculate evaluation metrics
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))
total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = total_intersect_and_union(
results, gt_seg_maps, num_classes, ignore_index, label_map,
reduce_zero_label)
all_acc = total_area_intersect.sum() / total_area_label.sum()
ret_metrics = OrderedDict({'aAcc': all_acc})
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
acc = total_area_intersect / total_area_label
ret_metrics['IoU'] = iou
ret_metrics['Acc'] = acc
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
acc = total_area_intersect / total_area_label
ret_metrics['Dice'] = dice
ret_metrics['Acc'] = acc
elif metric == 'mFscore':
precision = total_area_intersect / total_area_pred_label
recall = total_area_intersect / total_area_label
f_value = torch.tensor(
[f_score(x[0], x[1], beta) for x in zip(precision, recall)])
ret_metrics['Fscore'] = f_value
ret_metrics['Precision'] = precision
ret_metrics['Recall'] = recall
ret_metrics = {
metric: value.numpy()
for metric, value in ret_metrics.items()
}
if nan_to_num is not None:
ret_metrics = OrderedDict({
metric: np.nan_to_num(metric_value, nan=nan_to_num)
for metric, metric_value in ret_metrics.items()
})
return ret_metrics
| 13,051 | 38.914373 | 79 | py |
CP2 | CP2-main/mmseg/core/seg/sampler/ohem_pixel_sampler.py | import torch
import torch.nn.functional as F
from ..builder import PIXEL_SAMPLERS
from .base_pixel_sampler import BasePixelSampler
@PIXEL_SAMPLERS.register_module()
class OHEMPixelSampler(BasePixelSampler):
"""Online Hard Example Mining Sampler for segmentation.
Args:
context (nn.Module): The context of sampler, subclass of
:obj:`BaseDecodeHead`.
thresh (float, optional): The threshold for hard example selection.
Below which, are prediction with low confidence. If not
specified, the hard examples will be pixels of top ``min_kept``
loss. Default: None.
min_kept (int, optional): The minimum number of predictions to keep.
Default: 100000.
"""
def __init__(self, context, thresh=None, min_kept=100000):
super(OHEMPixelSampler, self).__init__()
self.context = context
assert min_kept > 1
self.thresh = thresh
self.min_kept = min_kept
def sample(self, seg_logit, seg_label):
"""Sample pixels that have high loss or with low prediction confidence.
Args:
seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)
seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)
Returns:
torch.Tensor: segmentation weight, shape (N, H, W)
"""
with torch.no_grad():
assert seg_logit.shape[2:] == seg_label.shape[2:]
assert seg_label.shape[1] == 1
seg_label = seg_label.squeeze(1).long()
batch_kept = self.min_kept * seg_label.size(0)
valid_mask = seg_label != self.context.ignore_index
seg_weight = seg_logit.new_zeros(size=seg_label.size())
valid_seg_weight = seg_weight[valid_mask]
if self.thresh is not None:
seg_prob = F.softmax(seg_logit, dim=1)
tmp_seg_label = seg_label.clone().unsqueeze(1)
tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
sort_prob, sort_indices = seg_prob[valid_mask].sort()
if sort_prob.numel() > 0:
min_threshold = sort_prob[min(batch_kept,
sort_prob.numel() - 1)]
else:
min_threshold = 0.0
threshold = max(min_threshold, self.thresh)
valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
else:
losses = self.context.loss_decode(
seg_logit,
seg_label,
weight=None,
ignore_index=self.context.ignore_index,
reduction_override='none')
# faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa
_, sort_indices = losses[valid_mask].sort(descending=True)
valid_seg_weight[sort_indices[:batch_kept]] = 1.
seg_weight[valid_mask] = valid_seg_weight
return seg_weight
| 3,155 | 39.987013 | 103 | py |
CP2 | CP2-main/mmseg/models/decode_heads/fcn_head.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class FCNHead(BaseDecodeHead):
"""Fully Convolution Networks for Semantic Segmentation.
This head is implemented of `FCNNet <https://arxiv.org/abs/1411.4038>`_.
Args:
num_convs (int): Number of convs in the head. Default: 2.
kernel_size (int): The kernel size for convs in the head. Default: 3.
concat_input (bool): Whether concat the input and output of convs
before classification layer.
dilation (int): The dilation rate for convs in the head. Default: 1.
"""
def __init__(self,
num_convs=2,
kernel_size=3,
concat_input=True,
contrast=False,
dilation=1,
**kwargs):
assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int)
self.num_convs = num_convs
self.concat_input = concat_input
self.contrast = contrast
self.kernel_size = kernel_size
super(FCNHead, self).__init__(**kwargs)
if num_convs == 0:
assert self.in_channels == self.channels
conv_padding = (kernel_size // 2) * dilation
convs = []
convs.append(
ConvModule(
self.in_channels,
self.channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
for i in range(num_convs - 1):
convs.append(
ConvModule(
self.channels,
self.channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
if num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = ConvModule(
self.in_channels + self.channels,
self.channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if self.contrast:
self.contrast_conv = nn.Sequential(
nn.Conv2d(self.channels, self.channels, 1),
nn.ReLU(),
nn.Conv2d(self.channels, 128, 1))
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
output = self.convs(x)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
if self.contrast:
output = self.contrast_conv(output)
else:
output = self.cls_seg(output)
return output
| 3,166 | 33.423913 | 77 | py |
CP2 | CP2-main/mmseg/models/decode_heads/decode_head.py | from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmcv.cnn import constant_init
from mmcv.runner import auto_fp16, force_fp32
from mmcv.runner import load_checkpoint
from mmseg.utils import get_root_logger
from mmseg.core import build_pixel_sampler
from mmseg.ops import resize
from ..builder import build_loss
from ..losses import accuracy
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
"""Base class for BaseDecodeHead.
Args:
in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes.
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None.
norm_cfg (dict|None): Config of norm layers. Default: None.
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU')
in_index (int|Sequence[int]): Input feature index. Default: -1
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
Default: None.
loss_decode (dict): Config of decode loss.
Default: dict(type='CrossEntropyLoss').
ignore_index (int | None): The label index to be ignored. When using
masked BCE loss, ignore_index should be set to None. Default: 255
sampler (dict|None): The config of segmentation map sampler.
Default: None.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
"""
def __init__(self,
in_channels,
channels,
*,
num_classes,
dropout_ratio=0.1,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
in_index=-1,
input_transform=None,
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
ignore_index=255,
sampler=None,
align_corners=False):
super(BaseDecodeHead, self).__init__()
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.loss_decode = build_loss(loss_decode)
self.ignore_index = ignore_index
self.align_corners = align_corners
if sampler is not None:
self.sampler = build_pixel_sampler(sampler, context=self)
else:
self.sampler = None
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
"""Extra repr."""
s = f'input_transform={self.input_transform}, ' \
f'ignore_index={self.ignore_index}, ' \
f'align_corners={self.align_corners}'
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"""Check and initialize input transforms.
The in_channels, in_index and input_transform must match.
Specifically, when input_transform is None, only single feature map
will be selected. So in_channels and in_index must be of type int.
When input_transform
Args:
in_channels (int|Sequence[int]): Input channels.
in_index (int|Sequence[int]): Input feature index.
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
"""
if input_transform is not None:
assert input_transform in ['resize_concat', 'multiple_select']
self.input_transform = input_transform
self.in_index = in_index
if input_transform is not None:
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert len(in_channels) == len(in_index)
if input_transform == 'resize_concat':
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def init_weights(self, pretrained=None):
"""Initialize weights of the whole decoder head."""
# normal_init(self.conv_seg, mean=0, std=0.01)
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
normal_init(self.conv_seg, mean=0, std=0.01)
# constant_init(self.conv_seg, 0)
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if self.input_transform == 'resize_concat':
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(
input=x,
size=inputs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners) for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == 'multiple_select':
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
@auto_fp16()
@abstractmethod
def forward(self, inputs):
"""Placeholder of forward function."""
pass
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
seg_logits = self.forward(inputs)
losses = self.losses(seg_logits, gt_semantic_seg)
return losses
def forward_test(self, inputs, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
return self.forward(inputs)
def cls_seg(self, feat):
"""Classify each pixel."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.conv_seg(feat)
return output
@force_fp32(apply_to=('seg_logit', ))
def losses(self, seg_logit, seg_label):
"""Compute segmentation loss."""
loss = dict()
seg_logit = resize(
input=seg_logit,
size=seg_label.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
if self.sampler is not None:
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
seg_label = seg_label.squeeze(1)
loss['loss_seg'] = self.loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
loss['acc_seg'] = accuracy(seg_logit, seg_label)
return loss
| 9,545 | 38.283951 | 78 | py |
CP2 | CP2-main/mmseg/models/decode_heads/aspp_head.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from mmseg.models.builder import HEADS
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
class ASPPModule(nn.ModuleList):
"""Atrous Spatial Pyramid Pooling (ASPP) Module.
Args:
dilations (tuple[int]): Dilation rate of each layer.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
"""
def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg,
act_cfg):
super(ASPPModule, self).__init__()
self.dilations = dilations
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for dilation in dilations:
self.append(
ConvModule(
self.in_channels,
self.channels,
1 if dilation == 1 else 3,
dilation=dilation,
padding=0 if dilation == 1 else dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def forward(self, x):
"""Forward function."""
aspp_outs = []
for aspp_module in self:
aspp_outs.append(aspp_module(x))
return aspp_outs
@HEADS.register_module()
class ASPPHead(BaseDecodeHead):
"""Rethinking Atrous Convolution for Semantic Image Segmentation.
This head is the implementation of `DeepLabV3
<https://arxiv.org/abs/1706.05587>`_.
Args:
dilations (tuple[int]): Dilation rates for ASPP module.
Default: (1, 6, 12, 18).
"""
def __init__(self, dilations=(1, 6, 12, 18), contrast=False, **kwargs):
super(ASPPHead, self).__init__(**kwargs)
assert isinstance(dilations, (list, tuple))
self.dilations = dilations
self.contrast = contrast
self.image_pool = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.aspp_modules = ASPPModule(
dilations,
self.in_channels,
self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.bottleneck = ConvModule(
(len(dilations) + 1) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if self.contrast:
self.contrast_conv = nn.Sequential(
nn.Conv2d(self.channels, self.channels, 1),
nn.ReLU(),
nn.Conv2d(self.channels, 128, 1))
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
aspp_outs = [
resize(
self.image_pool(x),
size=x.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
]
aspp_outs.extend(self.aspp_modules(x))
aspp_outs = torch.cat(aspp_outs, dim=1)
output = self.bottleneck(aspp_outs)
if self.contrast:
output = self.contrast_conv(output)
else:
output = self.cls_seg(output)
return output | 3,807 | 31.547009 | 76 | py |
CP2 | CP2-main/mmseg/models/utils/se_layer.py | import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from .make_divisible import make_divisible
class SELayer(nn.Module):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configured
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configured by the first dict and the
second activation layer will be configured by the second dict.
Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0)).
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=3.0, divisor=6.0))):
super(SELayer, self).__init__()
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=make_divisible(channels // ratio, 8),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=make_divisible(channels // ratio, 8),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| 2,103 | 35.275862 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/weight_init.py | """Modified from https://github.com/rwightman/pytorch-image-
models/blob/master/timm/models/layers/drop.py."""
import math
import warnings
import torch
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Reference: https://people.sc.fsu.edu/~jburkardt/presentations
/truncated_normal.pdf"""
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower_bound = norm_cdf((a - mean) / std)
upper_bound = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * lower_bound - 1, 2 * upper_bound - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`
mean (float): the mean of the normal distribution
std (float): the standard deviation of the normal distribution
a (float): the minimum cutoff value
b (float): the maximum cutoff value
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| 2,327 | 35.952381 | 76 | py |
CP2 | CP2-main/mmseg/models/utils/res_layer.py | from mmcv.cnn import build_conv_layer, build_norm_layer
from torch import nn as nn
class ResLayer(nn.Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
multi_grid (int | None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
dilation=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
multi_grid=None,
contract_dilation=False,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
| 3,315 | 33.905263 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/self_attention_block.py | import torch
from mmcv.cnn import ConvModule, constant_init
from torch import nn as nn
from torch.nn import functional as F
class SelfAttentionBlock(nn.Module):
"""General self-attention block/non-local block.
Please refer to https://arxiv.org/abs/1706.03762 for details about key,
query and value.
Args:
key_in_channels (int): Input channels of key feature.
query_in_channels (int): Input channels of query feature.
channels (int): Output channels of key/query transform.
out_channels (int): Output channels.
share_key_query (bool): Whether share projection weight between key
and query projection.
query_downsample (nn.Module): Query downsample module.
key_downsample (nn.Module): Key downsample module.
key_query_num_convs (int): Number of convs for key/query projection.
value_num_convs (int): Number of convs for value projection.
matmul_norm (bool): Whether normalize attention map with sqrt of
channels
with_out (bool): Whether use out projection.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict|None): Config of activation layers.
"""
def __init__(self, key_in_channels, query_in_channels, channels,
out_channels, share_key_query, query_downsample,
key_downsample, key_query_num_convs, value_out_num_convs,
key_query_norm, value_out_norm, matmul_norm, with_out,
conv_cfg, norm_cfg, act_cfg):
super(SelfAttentionBlock, self).__init__()
if share_key_query:
assert key_in_channels == query_in_channels
self.key_in_channels = key_in_channels
self.query_in_channels = query_in_channels
self.out_channels = out_channels
self.channels = channels
self.share_key_query = share_key_query
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.key_project = self.build_project(
key_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if share_key_query:
self.query_project = self.key_project
else:
self.query_project = self.build_project(
query_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.value_project = self.build_project(
key_in_channels,
channels if with_out else out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if with_out:
self.out_project = self.build_project(
channels,
out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.out_project = None
self.query_downsample = query_downsample
self.key_downsample = key_downsample
self.matmul_norm = matmul_norm
self.init_weights()
def init_weights(self):
"""Initialize weight of later layer."""
if self.out_project is not None:
if not isinstance(self.out_project, ConvModule):
constant_init(self.out_project, 0)
def build_project(self, in_channels, channels, num_convs, use_conv_module,
conv_cfg, norm_cfg, act_cfg):
"""Build projection layer for key/query/value/out."""
if use_conv_module:
convs = [
ConvModule(
in_channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
]
for _ in range(num_convs - 1):
convs.append(
ConvModule(
channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
else:
convs = [nn.Conv2d(in_channels, channels, 1)]
for _ in range(num_convs - 1):
convs.append(nn.Conv2d(channels, channels, 1))
if len(convs) > 1:
convs = nn.Sequential(*convs)
else:
convs = convs[0]
return convs
def forward(self, query_feats, key_feats):
"""Forward function."""
batch_size = query_feats.size(0)
query = self.query_project(query_feats)
if self.query_downsample is not None:
query = self.query_downsample(query)
query = query.reshape(*query.shape[:2], -1)
query = query.permute(0, 2, 1).contiguous()
key = self.key_project(key_feats)
value = self.value_project(key_feats)
if self.key_downsample is not None:
key = self.key_downsample(key)
value = self.key_downsample(value)
key = key.reshape(*key.shape[:2], -1)
value = value.reshape(*value.shape[:2], -1)
value = value.permute(0, 2, 1).contiguous()
sim_map = torch.matmul(query, key)
if self.matmul_norm:
sim_map = (self.channels**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.reshape(batch_size, -1, *query_feats.shape[2:])
if self.out_project is not None:
context = self.out_project(context)
return context
| 6,125 | 37.2875 | 78 | py |
CP2 | CP2-main/mmseg/models/utils/up_conv_block.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer
class UpConvBlock(nn.Module):
"""Upsample convolution block in decoder for UNet.
This upsample convolution block consists of one upsample module
followed by one convolution block. The upsample module expands the
high-level low-resolution feature map and the convolution block fuses
the upsampled high-level low-resolution feature map and the low-level
high-resolution feature map from encoder.
Args:
conv_block (nn.Sequential): Sequential of convolutional layers.
in_channels (int): Number of input channels of the high-level
skip_channels (int): Number of input channels of the low-level
high-resolution feature map from encoder.
out_channels (int): Number of output channels.
num_convs (int): Number of convolutional layers in the conv_block.
Default: 2.
stride (int): Stride of convolutional layer in conv_block. Default: 1.
dilation (int): Dilation rate of convolutional layer in conv_block.
Default: 1.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
conv_cfg (dict | None): Config dict for convolution layer.
Default: None.
norm_cfg (dict | None): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict | None): Config dict for activation layer in ConvModule.
Default: dict(type='ReLU').
upsample_cfg (dict): The upsample config of the upsample module in
decoder. Default: dict(type='InterpConv'). If the size of
high-level feature map is the same as that of skip feature map
(low-level feature map from encoder), it does not need upsample the
high-level feature map and the upsample_cfg is None.
dcn (bool): Use deformable convolution in convolutional layer or not.
Default: None.
plugins (dict): plugins for convolutional layers. Default: None.
"""
def __init__(self,
conv_block,
in_channels,
skip_channels,
out_channels,
num_convs=2,
stride=1,
dilation=1,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='InterpConv'),
dcn=None,
plugins=None):
super(UpConvBlock, self).__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.conv_block = conv_block(
in_channels=2 * skip_channels,
out_channels=out_channels,
num_convs=num_convs,
stride=stride,
dilation=dilation,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
dcn=None,
plugins=None)
if upsample_cfg is not None:
self.upsample = build_upsample_layer(
cfg=upsample_cfg,
in_channels=in_channels,
out_channels=skip_channels,
with_cp=with_cp,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.upsample = ConvModule(
in_channels,
skip_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, skip, x):
"""Forward function."""
x = self.upsample(x)
out = torch.cat([skip, x], dim=1)
out = self.conv_block(out)
return out
| 3,968 | 37.911765 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/inverted_residual.py | from mmcv.cnn import ConvModule
from torch import nn
from torch.utils import checkpoint as cp
from .se_layer import SELayer
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class InvertedResidualV3(nn.Module):
"""Inverted Residual Block for MobileNetV3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels. Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super(InvertedResidualV3, self).__init__()
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=dict(
type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 7,005 | 32.521531 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/drop.py | """Modified from https://github.com/rwightman/pytorch-image-
models/blob/master/timm/models/layers/drop.py."""
import torch
from torch import nn
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
residual blocks).
Args:
drop_prob (float): Drop rate for paths of model. Dropout rate has
to be between 0 and 1. Default: 0.
"""
def __init__(self, drop_prob=0.):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.keep_prob = 1 - drop_prob
def forward(self, x):
if self.drop_prob == 0. or not self.training:
return x
shape = (x.shape[0], ) + (1, ) * (
x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = self.keep_prob + torch.rand(
shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(self.keep_prob) * random_tensor
return output
| 1,015 | 30.75 | 78 | py |
CP2 | CP2-main/mmseg/models/segmentors/base.py | import logging
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from mmcv.runner import auto_fp16
class BaseSegmentor(nn.Module):
"""Base class for segmentors."""
__metaclass__ = ABCMeta
def __init__(self):
super(BaseSegmentor, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the segmentor has neck"""
return hasattr(self, 'neck') and self.neck is not None
@property
def with_auxiliary_head(self):
"""bool: whether the segmentor has auxiliary head"""
return hasattr(self,
'auxiliary_head') and self.auxiliary_head is not None
@property
def with_decode_head(self):
"""bool: whether the segmentor has decode head"""
return hasattr(self, 'decode_head') and self.decode_head is not None
@abstractmethod
def extract_feat(self, imgs):
"""Placeholder for extract features from images."""
pass
@abstractmethod
def encode_decode(self, img, img_metas):
"""Placeholder for encode images with backbone and decode into a
semantic segmentation map of the same size as input."""
pass
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""Placeholder for Forward function for training."""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
"""Placeholder for single image test."""
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Placeholder for augmentation test."""
pass
def init_weights(self, pretrained=None):
"""Initialize the weights in segmentor.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained is not None:
logger = logging.getLogger()
logger.info(f'load model from: {pretrained}')
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got '
f'{type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) != '
f'num of image meta ({len(img_metas)})')
# all images in the same aug batch all of the same ori_shape and pad
# shape
for img_meta in img_metas:
ori_shapes = [_['ori_shape'] for _ in img_meta]
assert all(shape == ori_shapes[0] for shape in ori_shapes)
img_shapes = [_['img_shape'] for _ in img_meta]
assert all(shape == img_shapes[0] for shape in img_shapes)
pad_shapes = [_['pad_shape'] for _ in img_meta]
assert all(shape == pad_shapes[0] for shape in pad_shapes)
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img_metas']))
return outputs
def val_step(self, data_batch, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
output = self(**data_batch, **kwargs)
return output
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def show_result(self,
img,
result,
palette=None,
win_name='',
show=False,
wait_time=0,
out_file=None,
opacity=0.5):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor): The semantic segmentation results to draw over
`img`.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
if palette is None:
if self.PALETTE is None:
palette = np.random.randint(
0, 255, size=(len(self.CLASSES), 3))
else:
palette = self.PALETTE
palette = np.array(palette)
assert palette.shape[0] == len(self.CLASSES)
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
| 10,350 | 36.777372 | 79 | py |
CP2 | CP2-main/mmseg/models/segmentors/encoder_decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .base import BaseSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(EncoderDecoder, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
assert self.with_decode_head
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone and heads.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(EncoderDecoder, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
# self.decode_head.init_weights()
self.decode_head.init_weights(pretrained=pretrained)
if self.with_auxiliary_head:
if isinstance(self.auxiliary_head, nn.ModuleList):
for aux_head in self.auxiliary_head:
aux_head.init_weights()
else:
self.auxiliary_head.init_weights()
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward(self, img, img_metas=None, return_loss=True, **kwargs):
if img_metas is None:
x = self.extract_feat(img)
return self.decode_head.forward(x)
else:
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
gt_semantic_seg)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.encode_decode(img, img_meta)
if rescale:
# support dynamic shape for onnx
if torch.onnx.is_in_onnx_export():
size = img.shape[2:]
else:
size = img_meta[0]['ori_shape'][:2]
seg_logit = resize(
seg_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit = self.whole_inference(img, img_meta, rescale)
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
| 11,752 | 36.790997 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/dice_loss.py | """Modified from https://github.com/LikeLy-Journey/SegmenTron/blob/master/
segmentron/solver/loss.py (Apache-2.0 License)"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weighted_loss
@weighted_loss
def dice_loss(pred,
target,
valid_mask,
smooth=1,
exponent=2,
class_weight=None,
ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(
pred[:, i],
target[..., i],
valid_mask=valid_mask,
smooth=smooth,
exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@LOSSES.register_module()
class DiceLoss(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
"""
def __init__(self,
smooth=1,
exponent=2,
reduction='mean',
class_weight=None,
loss_weight=1.0,
ignore_index=255,
**kwards):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
def forward(self,
pred,
target,
avg_factor=None,
reduction_override=None,
**kwards):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(
torch.clamp(target.long(), 0, num_classes - 1),
num_classes=num_classes)
valid_mask = (target != self.ignore_index).long()
loss = self.loss_weight * dice_loss(
pred,
one_hot_target,
valid_mask=valid_mask,
reduction=reduction,
avg_factor=avg_factor,
smooth=self.smooth,
exponent=self.exponent,
class_weight=class_weight,
ignore_index=self.ignore_index)
return loss
| 4,239 | 34.333333 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/lovasz_loss.py | """Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor
ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim
Berman 2018 ESAT-PSI KU Leuven (MIT License)"""
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def lovasz_grad(gt_sorted):
"""Computes gradient of the Lovasz extension w.r.t sorted errors.
See Alg. 1 in paper.
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def flatten_binary_logits(logits, labels, ignore_index=None):
"""Flattens predictions in the batch (binary case) Remove labels equal to
'ignore_index'."""
logits = logits.view(-1)
labels = labels.view(-1)
if ignore_index is None:
return logits, labels
valid = (labels != ignore_index)
vlogits = logits[valid]
vlabels = labels[valid]
return vlogits, vlabels
def flatten_probs(probs, labels, ignore_index=None):
"""Flattens predictions in the batch."""
if probs.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probs.size()
probs = probs.view(B, 1, H, W)
B, C, H, W = probs.size()
probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C
labels = labels.view(-1)
if ignore_index is None:
return probs, labels
valid = (labels != ignore_index)
vprobs = probs[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobs, vlabels
def lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [P], logits at each prediction
(between -infty and +infty).
labels (torch.Tensor): [P], binary ground truth labels (0 or 1).
Returns:
torch.Tensor: The calculated loss.
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def lovasz_hinge(logits,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [B, H, W], logits at each pixel
(between -infty and +infty).
labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1).
classes (str | list[int], optional): Placeholder, to be consistent with
other loss. Default: None.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): Placeholder, to be consistent
with other loss. Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_hinge_flat(*flatten_binary_logits(
logit.unsqueeze(0), label.unsqueeze(0), ignore_index))
for logit, label in zip(logits, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_hinge_flat(
*flatten_binary_logits(logits, labels, ignore_index))
return loss
def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [P, C], class probabilities at each prediction
(between 0 and 1).
labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
class_weight (list[float], optional): The weight for each class.
Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
if probs.numel() == 0:
# only void pixels, the gradients should be 0
return probs * 0.
C = probs.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes == 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probs[:, 0]
else:
class_pred = probs[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted))
if class_weight is not None:
loss *= class_weight[c]
losses.append(loss)
return torch.stack(losses).mean()
def lovasz_softmax(probs,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [B, C, H, W], class probabilities at each
prediction (between 0 and 1).
labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and
C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): The weight for each class.
Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_softmax_flat(
*flatten_probs(
prob.unsqueeze(0), label.unsqueeze(0), ignore_index),
classes=classes,
class_weight=class_weight)
for prob, label in zip(probs, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_softmax_flat(
*flatten_probs(probs, labels, ignore_index),
classes=classes,
class_weight=class_weight)
return loss
@LOSSES.register_module()
class LovaszLoss(nn.Module):
"""LovaszLoss.
This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate
for the optimization of the intersection-over-union measure in neural
networks <https://arxiv.org/abs/1705.08790>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
loss_type='multi_class',
classes='present',
per_image=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
super(LovaszLoss, self).__init__()
assert loss_type in ('binary', 'multi_class'), "loss_type should be \
'binary' or 'multi_class'."
if loss_type == 'binary':
self.cls_criterion = lovasz_hinge
else:
self.cls_criterion = lovasz_softmax
assert classes in ('all', 'present') or mmcv.is_list_of(classes, int)
if not per_image:
assert reduction == 'none', "reduction should be 'none' when \
per_image is False."
self.classes = classes
self.per_image = per_image
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
# if multi-class loss, transform logits to probs
if self.cls_criterion == lovasz_softmax:
cls_score = F.softmax(cls_score, dim=1)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
self.classes,
self.per_image,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| 11,391 | 36.473684 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/utils.py | import functools
import mmcv
import numpy as np
import torch.nn.functional as F
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
# take it as a file path
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
# pkl, json or yaml
class_weight = mmcv.load(class_weight)
return class_weight
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| 3,690 | 29.254098 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/accuracy.py | import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == target.ndim + 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
# transpose to shape (maxk, N, ...)
pred_label = pred_label.transpose(0, 1)
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / target.numel()))
return res[0] if return_single else res
class Accuracy(nn.Module):
"""Accuracy calculation module."""
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| 2,970 | 36.607595 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/cross_entropy_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored. Default: 255
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
assert (pred.dim() == 2 and label.dim() == 1) or (
pred.dim() == 4 and label.dim() == 3), \
'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \
'H, W], label shape [N, H, W] are supported'
label, weight = _expand_onehot_labels(label, weight, pred.shape,
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| 7,437 | 36.376884 | 79 | py |
CP2 | CP2-main/mmseg/models/backbones/resnet.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
constant_init, kaiming_init)
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
from ..utils import ResLayer
class BasicBlock(nn.Module):
"""Basic block for ResNet."""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None):
super(BasicBlock, self).__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is
"caffe", the stride-two layer is the first 1x1 conv layer.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None):
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert plugins is None or isinstance(plugins, list)
if plugins is not None:
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(p['position'] in allowed_position for p in plugins)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.plugins = plugins
self.with_plugins = plugins is not None
if self.with_plugins:
# collect plugins for conv1/conv2/conv3
self.after_conv1_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv1'
]
self.after_conv2_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv2'
]
self.after_conv3_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv3'
]
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
dcn,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(
planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
planes * self.expansion, self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
"""make plugins for block.
Args:
in_channels (int): Input channels of plugin.
plugins (list[dict]): List of plugins cfg to build.
Returns:
list[str]: List of the names of plugin.
"""
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
name, layer = build_plugin_layer(
plugin,
in_channels=in_channels,
postfix=plugin.pop('postfix', ''))
assert not hasattr(self, name), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
"""Forward function for plugins."""
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default" 3.
stem_channels (int): Number of stem channels. Default: 64.
base_channels (int): Number of base channels of res layer. Default: 64.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert plugin,
options: 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'
multi_grid (Sequence[int]|None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmseg.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=64,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
multi_grid=None,
contract_dilation=False,
with_cp=False,
zero_init_residual=True):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
# multi grid is applied to last layer only
stage_multi_grid = multi_grid if i == len(
self.stage_blocks) - 1 else None
planes = base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins,
multi_grid=stage_multi_grid,
contract_dilation=contract_dilation)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i+1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""make plugins for ResNet 'stage_idx'th stage .
Currently we support to insert 'context_block',
'empirical_attention_block', 'nonlocal_block' into the backbone like
ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be :
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose 'stage_idx=0', the structure of blocks in the stage would be:
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
"""Make stem layer for ResNet."""
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
"""Freeze stages param and norm stats."""
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m, 'conv2_offset'):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1c(ResNet):
"""ResNetV1c variant described in [1]_.
Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv
in the input stem with three 3x3 convs.
References:
.. [1] https://arxiv.org/pdf/1812.01187.pdf
"""
def __init__(self, **kwargs):
super(ResNetV1c, self).__init__(
deep_stem=True, avg_down=False, **kwargs)
@BACKBONES.register_module()
class ResNetV1d(ResNet):
"""ResNetV1d variant described in [1]_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
| 24,210 | 34.139332 | 79 | py |
CP2 | CP2-main/mmseg/models/backbones/vit.py | """Modified from https://github.com/rwightman/pytorch-image-
models/blob/master/timm/models/vision_transformer.py."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import (Conv2d, Linear, build_activation_layer, build_norm_layer,
constant_init, kaiming_init, normal_init)
from mmcv.runner import _load_checkpoint
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
from ..utils import DropPath, trunc_normal_
class Mlp(nn.Module):
"""MLP layer for Encoder block.
Args:
in_features(int): Input dimension for the first fully
connected layer.
hidden_features(int): Output dimension for the first fully
connected layer.
out_features(int): Output dementsion for the second fully
connected layer.
act_cfg(dict): Config dict for activation layer.
Default: dict(type='GELU').
drop(float): Drop rate for the dropout layer. Dropout rate has
to be between 0 and 1. Default: 0.
"""
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_cfg=dict(type='GELU'),
drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = Linear(in_features, hidden_features)
self.act = build_activation_layer(act_cfg)
self.fc2 = Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
"""Attention layer for Encoder block.
Args:
dim (int): Dimension for the input vector.
num_heads (int): Number of parallel attention heads.
qkv_bias (bool): Enable bias for qkv if True. Default: False.
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
attn_drop (float): Drop rate for attention output weights.
Default: 0.
proj_drop (float): Drop rate for output weights. Default: 0.
"""
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
proj_drop=0.):
super(Attention, self).__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
b, n, c = x.shape
qkv = self.qkv(x).reshape(b, n, 3, self.num_heads,
c // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(b, n, c)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
"""Implements encoder block with residual connection.
Args:
dim (int): The feature dimension.
num_heads (int): Number of parallel attention heads.
mlp_ratio (int): Ratio of mlp hidden dim to embedding dim.
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop (float): Drop rate for mlp output weights. Default: 0.
attn_drop (float): Drop rate for attention output weights.
Default: 0.
proj_drop (float): Drop rate for attn layer output weights.
Default: 0.
drop_path (float): Drop rate for paths of model.
Default: 0.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN', requires_grad=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
dim,
num_heads,
mlp_ratio=4,
qkv_bias=False,
qk_scale=None,
drop=0.,
attn_drop=0.,
proj_drop=0.,
drop_path=0.,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN', eps=1e-6),
with_cp=False):
super(Block, self).__init__()
self.with_cp = with_cp
_, self.norm1 = build_norm_layer(norm_cfg, dim)
self.attn = Attention(dim, num_heads, qkv_bias, qk_scale, attn_drop,
proj_drop)
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
_, self.norm2 = build_norm_layer(norm_cfg, dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_cfg=act_cfg,
drop=drop)
def forward(self, x):
def _inner_forward(x):
out = x + self.drop_path(self.attn(self.norm1(x)))
out = out + self.drop_path(self.mlp(self.norm2(out)))
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class PatchEmbed(nn.Module):
"""Image to Patch Embedding.
Args:
img_size (int | tuple): Input image size.
default: 224.
patch_size (int): Width and height for a patch.
default: 16.
in_channels (int): Input channels for images. Default: 3.
embed_dim (int): The embedding dimension. Default: 768.
"""
def __init__(self,
img_size=224,
patch_size=16,
in_channels=3,
embed_dim=768):
super(PatchEmbed, self).__init__()
if isinstance(img_size, int):
self.img_size = (img_size, img_size)
elif isinstance(img_size, tuple):
self.img_size = img_size
else:
raise TypeError('img_size must be type of int or tuple')
h, w = self.img_size
self.patch_size = (patch_size, patch_size)
self.num_patches = (h // patch_size) * (w // patch_size)
self.proj = Conv2d(
in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
return self.proj(x).flatten(2).transpose(1, 2)
@BACKBONES.register_module()
class VisionTransformer(nn.Module):
"""Vision transformer backbone.
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for
Image Recognition at Scale` - https://arxiv.org/abs/2010.11929
Args:
img_size (tuple): input image size. Default: (224, 224).
patch_size (int, tuple): patch size. Default: 16.
in_channels (int): number of input channels. Default: 3.
embed_dim (int): embedding dimension. Default: 768.
depth (int): depth of transformer. Default: 12.
num_heads (int): number of attention heads. Default: 12.
mlp_ratio (int): ratio of mlp hidden dim to embedding dim.
Default: 4.
out_indices (list | tuple | int): Output from which stages.
Default: -1.
qkv_bias (bool): enable bias for qkv if True. Default: True.
qk_scale (float): override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): dropout rate. Default: 0.
attn_drop_rate (float): attention dropout rate. Default: 0.
drop_path_rate (float): Rate of DropPath. Default: 0.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN', eps=1e-6, requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='GELU').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Default: False.
out_reshape (str): Select the output format of feature information.
Default: NCHW.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Default: bicubic.
with_cls_token (bool): If concatenating class token into image tokens
as transformer input. Default: True.
with_cp (bool): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
"""
def __init__(self,
img_size=(224, 224),
patch_size=16,
in_channels=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
out_indices=11,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_cfg=dict(type='LN', eps=1e-6, requires_grad=True),
act_cfg=dict(type='GELU'),
norm_eval=False,
final_norm=False,
out_shape='NCHW',
with_cls_token=True,
interpolate_mode='bicubic',
with_cp=False):
super(VisionTransformer, self).__init__()
self.img_size = img_size
self.patch_size = patch_size
self.features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_channels=in_channels,
embed_dim=embed_dim)
self.with_cls_token = with_cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.patch_embed.num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if isinstance(out_indices, int):
self.out_indices = [out_indices]
elif isinstance(out_indices, list) or isinstance(out_indices, tuple):
self.out_indices = out_indices
else:
raise TypeError('out_indices must be type of int, list or tuple')
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=dpr[i],
attn_drop=attn_drop_rate,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp) for i in range(depth)
])
assert out_shape in ['NLC',
'NCHW'], 'output shape must be "NLC" or "NCHW".'
self.out_shape = out_shape
self.interpolate_mode = interpolate_mode
self.final_norm = final_norm
if final_norm:
_, self.norm = build_norm_layer(norm_cfg, embed_dim)
self.norm_eval = norm_eval
self.with_cp = with_cp
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
checkpoint = _load_checkpoint(pretrained, logger=logger)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
if 'pos_embed' in state_dict.keys():
if self.pos_embed.shape != state_dict['pos_embed'].shape:
logger.info(msg=f'Resize the pos_embed shape from \
{state_dict["pos_embed"].shape} to {self.pos_embed.shape}')
h, w = self.img_size
pos_size = int(
math.sqrt(state_dict['pos_embed'].shape[1] - 1))
state_dict['pos_embed'] = self.resize_pos_embed(
state_dict['pos_embed'], (h, w), (pos_size, pos_size),
self.patch_size, self.interpolate_mode)
self.load_state_dict(state_dict, False)
# load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
# We only implement the 'jax_impl' initialization implemented at
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
for n, m in self.named_modules():
if isinstance(m, Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
if 'mlp' in n:
normal_init(m.bias, std=1e-6)
else:
constant_init(m.bias, 0)
elif isinstance(m, Conv2d):
kaiming_init(m.weight, mode='fan_in')
if m.bias is not None:
constant_init(m.bias, 0)
elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)):
constant_init(m.bias, 0)
constant_init(m.weight, 1.0)
else:
raise TypeError('pretrained must be a str or None')
def _pos_embeding(self, img, patched_img, pos_embed):
"""Positiong embeding method.
Resize the pos_embed, if the input image size doesn't match
the training size.
Args:
img (torch.Tensor): The inference image tensor, the shape
must be [B, C, H, W].
patched_img (torch.Tensor): The patched image, it should be
shape of [B, L1, C].
pos_embed (torch.Tensor): The pos_embed weighs, it should be
shape of [B, L2, c].
Return:
torch.Tensor: The pos encoded image feature.
"""
assert patched_img.ndim == 3 and pos_embed.ndim == 3, \
'the shapes of patched_img and pos_embed must be [B, L, C]'
x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]
if x_len != pos_len:
if pos_len == (self.img_size[0] // self.patch_size) * (
self.img_size[1] // self.patch_size) + 1:
pos_h = self.img_size[0] // self.patch_size
pos_w = self.img_size[1] // self.patch_size
else:
raise ValueError(
'Unexpected shape of pos_embed, got {}.'.format(
pos_embed.shape))
pos_embed = self.resize_pos_embed(pos_embed, img.shape[2:],
(pos_h, pos_w), self.patch_size,
self.interpolate_mode)
return self.pos_drop(patched_img + pos_embed)
@staticmethod
def resize_pos_embed(pos_embed, input_shpae, pos_shape, patch_size, mode):
"""Resize pos_embed weights.
Resize pos_embed using bicubic interpolate method.
Args:
pos_embed (torch.Tensor): pos_embed weights.
input_shpae (tuple): Tuple for (input_h, intput_w).
pos_shape (tuple): Tuple for (pos_h, pos_w).
patch_size (int): Patch size.
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C]
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
input_h, input_w = input_shpae
pos_h, pos_w = pos_shape
cls_token_weight = pos_embed[:, 0]
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)
pos_embed_weight = F.interpolate(
pos_embed_weight,
size=[input_h // patch_size, input_w // patch_size],
align_corners=False,
mode=mode)
cls_token_weight = cls_token_weight.unsqueeze(1)
pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)
pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
return pos_embed
def forward(self, inputs):
B = inputs.shape[0]
x = self.patch_embed(inputs)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self._pos_embeding(inputs, x, self.pos_embed)
if not self.with_cls_token:
# Remove class token for transformer input
x = x[:, 1:]
outs = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if i == len(self.blocks) - 1:
if self.final_norm:
x = self.norm(x)
if i in self.out_indices:
if self.with_cls_token:
# Remove class token and reshape token for decoder head
out = x[:, 1:]
else:
out = x
if self.out_shape == 'NCHW':
B, _, C = out.shape
out = out.reshape(B, inputs.shape[2] // self.patch_size,
inputs.shape[3] // self.patch_size,
C).permute(0, 3, 1, 2).contiguous()
outs.append(out)
return tuple(outs)
def train(self, mode=True):
super(VisionTransformer, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.LayerNorm):
m.eval()
| 18,574 | 38.270613 | 128 | py |
CP2 | CP2-main/mmseg/datasets/custom.py | import os
import os.path as osp
from collections import OrderedDict
from functools import reduce
import mmcv
import numpy as np
from mmcv.utils import print_log
from prettytable import PrettyTable
from torch.utils.data import Dataset
from mmseg.core import eval_metrics
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for semantic segmentation. An example of file structure
is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, and
self.PALETTE is None, random palette will be generated.
Default: None
"""
CLASSES = None
PALETTE = None
def __init__(self,
pipeline,
img_dir,
img_suffix='.jpg',
ann_dir=None,
seg_map_suffix='.png',
split=None,
data_root=None,
test_mode=False,
ignore_index=255,
reduce_zero_label=False,
classes=None,
palette=None):
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.ann_dir = ann_dir
self.seg_map_suffix = seg_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
self.label_map = None
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
classes, palette)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.split is None or osp.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
self.ann_dir,
self.seg_map_suffix, self.split)
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_info = dict(filename=img)
if ann_dir is not None:
seg_map = img.replace(img_suffix, seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
results['img_prefix'] = self.img_dir
results['seg_prefix'] = self.ann_dir
if self.custom_classes:
results['label_map'] = self.label_map
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
else:
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by
pipeline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
def get_gt_seg_maps(self, efficient_test=False):
"""Get ground truth segmentation maps for evaluation."""
gt_seg_maps = []
for img_info in self.img_infos:
seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
if efficient_test:
gt_seg_map = seg_map
else:
gt_seg_map = mmcv.imread(
seg_map, flag='unchanged', backend='pillow')
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps
def get_classes_and_palette(self, classes=None, palette=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, random
palette will be generated. Default: None
"""
if classes is None:
self.custom_classes = False
return self.CLASSES, self.PALETTE
self.custom_classes = True
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
if self.CLASSES:
if not set(classes).issubset(self.CLASSES):
raise ValueError('classes is not a subset of CLASSES.')
# dictionary, its keys are the old label ids and its values
# are the new label ids.
# used for changing pixel labels in load_annotations.
self.label_map = {}
for i, c in enumerate(self.CLASSES):
if c not in class_names:
self.label_map[i] = -1
else:
self.label_map[i] = classes.index(c)
palette = self.get_palette_for_custom_classes(class_names, palette)
return class_names, palette
def get_palette_for_custom_classes(self, class_names, palette=None):
if self.label_map is not None:
# return subset of palette
palette = []
for old_id, new_id in sorted(
self.label_map.items(), key=lambda x: x[1]):
if new_id != -1:
palette.append(self.PALETTE[old_id])
palette = type(self.PALETTE)(palette)
elif palette is None:
if self.PALETTE is None:
palette = np.random.randint(0, 255, size=(len(class_names), 3))
else:
palette = self.PALETTE
return palette
def evaluate(self,
results,
metric='mIoU',
logger=None,
efficient_test=False,
**kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
'mDice' and 'mFscore' are supported.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if not set(metric).issubset(set(allowed_metrics)):
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
ret_metrics = eval_metrics(
results,
gt_seg_maps,
num_classes,
self.ignore_index,
metric,
label_map=self.label_map,
reduce_zero_label=self.reduce_zero_label)
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
# summary table
ret_metrics_summary = OrderedDict({
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
# each class table
ret_metrics.pop('aAcc', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
# for logger
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)
summary_table_data = PrettyTable()
for key, val in ret_metrics_summary.items():
if key == 'aAcc':
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column('m' + key, [val])
print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
print_log('\n' + summary_table_data.get_string(), logger=logger)
# each metric dict
for key, value in ret_metrics_summary.items():
if key == 'aAcc':
eval_results[key] = value / 100.0
else:
eval_results['m' + key] = value / 100.0
ret_metrics_class.pop('Class', None)
for key, value in ret_metrics_class.items():
eval_results.update({
key + '.' + str(name): value[idx] / 100.0
for idx, name in enumerate(class_names)
})
if mmcv.is_list_of(results, str):
for file_name in results:
os.remove(file_name)
return eval_results
| 14,628 | 35.481297 | 79 | py |
CP2 | CP2-main/mmseg/datasets/dataset_wrappers.py | from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = datasets[0].PALETTE
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = dataset.PALETTE
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get item from original dataset."""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""The length is multiplied by ``times``"""
return self.times * self._ori_len
| 1,499 | 28.411765 | 78 | py |
CP2 | CP2-main/mmseg/datasets/builder.py | import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
from torch.utils.data import DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def _concat_dataset(cfg, default_args=None):
"""Build :obj:`ConcatDataset by."""
from .dataset_wrappers import ConcatDataset
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
if ann_dir is not None:
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
else:
num_ann_dir = 0
if split is not None:
num_split = len(split) if isinstance(split, (list, tuple)) else 1
else:
num_split = 0
if num_img_dir > 1:
assert num_img_dir == num_ann_dir or num_ann_dir == 0
assert num_img_dir == num_split or num_split == 0
else:
assert num_split == num_ann_dir or num_ann_dir <= 1
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
"""Build datasets."""
from .dataset_wrappers import ConcatDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
cfg.get('split', None), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
dataloader_type='PoolDataLoader',
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
assert dataloader_type in (
'DataLoader',
'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
if dataloader_type == 'PoolDataLoader':
dataloader = PoolDataLoader
elif dataloader_type == 'DataLoader':
dataloader = DataLoader
data_loader = dataloader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Worker init func for dataloader.
The seed of each worker equals to num_worker * rank + worker_id + user_seed
Args:
worker_id (int): Worker id.
num_workers (int): Number of workers.
rank (int): The rank of current process.
seed (int): The random seed to use.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 5,871 | 33.541176 | 79 | py |
CP2 | CP2-main/mmseg/datasets/pipelines/formating.py | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor(object):
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor(object):
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose(object):
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer(object):
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True),
dict(key='gt_semantic_seg'))``.
"""
def __init__(self,
fields=(dict(key='img',
stack=True), dict(key='gt_semantic_seg'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
if 'img' in results:
img = results['img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
if 'gt_semantic_seg' in results:
# convert to long
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None,
...].astype(np.int64)),
stack=True)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class Collect(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "gt_semantic_seg".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the bottom/right
if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
| 9,228 | 30.934256 | 79 | py |
CP2 | CP2-main/mmseg/ops/wrappers.py | import warnings
import torch.nn as nn
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners)
class Upsample(nn.Module):
def __init__(self,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
super(Upsample, self).__init__()
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
if not self.size:
size = [int(t * self.scale_factor) for t in x.shape[-2:]]
else:
size = self.size
return resize(x, size, None, self.mode, self.align_corners)
| 1,827 | 34.843137 | 79 | py |
CP2 | CP2-main/mmseg/ops/encoding.py | import torch
from torch import nn
from torch.nn import functional as F
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
# init codewords and smoothing factor
self.channels, self.num_codes = channels, num_codes
std = 1. / ((num_codes * channels)**0.5)
# [num_codes, channels]
self.codewords = nn.Parameter(
torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std),
requires_grad=True)
# [num_codes]
self.scale = nn.Parameter(
torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0),
requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (
expanded_x - reshaped_codewords).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) *
(expanded_x - reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert x.dim() == 4 and x.size(1) == self.channels
# [batch_size, channels, height, width]
batch_size = x.size(0)
# [batch_size, height x width, channels]
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
# assignment_weights: [batch_size, channels, num_codes]
assignment_weights = F.softmax(
self.scaled_l2(x, self.codewords, self.scale), dim=2)
# aggregate
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \
f'x{self.channels})'
return repr_str
| 2,788 | 36.186667 | 78 | py |
CP2 | CP2-main/configs/config_pretrain.py | norm_cfg = dict(type='BN', requires_grad=True)
pretrain_path = None # Please set the path to pretrained weights for Quick Tuning
model = dict(
type='EncoderDecoder',
pretrained=pretrain_path,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 2),
strides=(1, 2, 2, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ASPPHead',
in_channels=2048,
in_index=3,
channels=512,
contrast=True,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 952 | 27.029412 | 84 | py |
CP2 | CP2-main/configs/config_finetune.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
pretrain_path = '' # Please set the path to pretrained model
data_root = '' # Please set the path to your finetuing dataset (PASCAL VOC 2012)
model = dict(
type='EncoderDecoder',
pretrained=pretrain_path,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 2),
strides=(1, 2, 2, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
# dataset settings
dataset_type = 'PascalVOCDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassAug',
split='ImageSets/Segmentation/train_aug.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassAug',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassAug',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline))
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=40000)
checkpoint_config = dict(by_epoch=False, interval=4000)
evaluation = dict(interval=4000, metric='mIoU')
| 3,664 | 29.798319 | 85 | py |
SA-UNet | SA-UNet-master/Dropblock.py | import keras
import keras.backend as K
class DropBlock1D(keras.layers.Layer):
"""See: https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(self,
block_size,
keep_prob,
sync_channels=False,
data_format=None,
**kwargs):
"""Initialize the layer.
:param block_size: Size for each mask block.
:param keep_prob: Probability of keeping the original feature.
:param sync_channels: Whether to use the same dropout for all channels.
:param data_format: 'channels_first' or 'channels_last' (default).
:param kwargs: Arguments for parent class.
"""
super(DropBlock1D, self).__init__(**kwargs)
self.block_size = block_size
self.keep_prob = keep_prob
self.sync_channels = sync_channels
self.data_format = K.normalize_data_format(data_format)
self.input_spec = keras.engine.base_layer.InputSpec(ndim=3)
self.supports_masking = True
def get_config(self):
config = {'block_size': self.block_size,
'keep_prob': self.keep_prob,
'sync_channels': self.sync_channels,
'data_format': self.data_format}
base_config = super(DropBlock1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
return input_shape
def _get_gamma(self, feature_dim):
"""Get the number of activation units to drop"""
feature_dim = K.cast(feature_dim, K.floatx())
block_size = K.constant(self.block_size, dtype=K.floatx())
return ((1.0 - self.keep_prob) / block_size) * (feature_dim / (feature_dim - block_size + 1.0))
def _compute_valid_seed_region(self, seq_length):
positions = K.arange(seq_length)
half_block_size = self.block_size // 2
valid_seed_region = K.switch(
K.all(
K.stack(
[
positions >= half_block_size,
positions < seq_length - half_block_size,
],
axis=-1,
),
axis=-1,
),
K.ones((seq_length,)),
K.zeros((seq_length,)),
)
return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
def _compute_drop_mask(self, shape):
seq_length = shape[1]
mask = K.random_binomial(shape, p=self._get_gamma(seq_length))
mask *= self._compute_valid_seed_region(seq_length)
mask = keras.layers.MaxPool1D(
pool_size=self.block_size,
padding='same',
strides=1,
data_format='channels_last',
)(mask)
return 1.0 - mask
def call(self, inputs, training=None):
def dropped_inputs():
outputs = inputs
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 1])
shape = K.shape(outputs)
if self.sync_channels:
mask = self._compute_drop_mask([shape[0], shape[1], 1])
else:
mask = self._compute_drop_mask(shape)
outputs = outputs * mask *\
(K.cast(K.prod(shape), dtype=K.floatx()) / K.sum(mask))
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 1])
return outputs
return K.in_train_phase(dropped_inputs, inputs, training=training)
class DropBlock2D(keras.layers.Layer):
"""See: https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(self,
block_size,
keep_prob,
sync_channels=False,
data_format=None,
**kwargs):
"""Initialize the layer.
:param block_size: Size for each mask block.
:param keep_prob: Probability of keeping the original feature.
:param sync_channels: Whether to use the same dropout for all channels.
:param data_format: 'channels_first' or 'channels_last' (default).
:param kwargs: Arguments for parent class.
"""
super(DropBlock2D, self).__init__(**kwargs)
self.block_size = block_size
self.keep_prob = keep_prob
self.sync_channels = sync_channels
self.data_format = K.normalize_data_format(data_format)
self.input_spec = keras.engine.base_layer.InputSpec(ndim=4)
self.supports_masking = True
def get_config(self):
config = {'block_size': self.block_size,
'keep_prob': self.keep_prob,
'sync_channels': self.sync_channels,
'data_format': self.data_format}
base_config = super(DropBlock2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
return input_shape
def _get_gamma(self, height, width):
"""Get the number of activation units to drop"""
height, width = K.cast(height, K.floatx()), K.cast(width, K.floatx())
block_size = K.constant(self.block_size, dtype=K.floatx())
return ((1.0 - self.keep_prob) / (block_size ** 2)) *\
(height * width / ((height - block_size + 1.0) * (width - block_size + 1.0)))
def _compute_valid_seed_region(self, height, width):
positions = K.concatenate([
K.expand_dims(K.tile(K.expand_dims(K.arange(height), axis=1), [1, width]), axis=-1),
K.expand_dims(K.tile(K.expand_dims(K.arange(width), axis=0), [height, 1]), axis=-1),
], axis=-1)
half_block_size = self.block_size // 2
valid_seed_region = K.switch(
K.all(
K.stack(
[
positions[:, :, 0] >= half_block_size,
positions[:, :, 1] >= half_block_size,
positions[:, :, 0] < height - half_block_size,
positions[:, :, 1] < width - half_block_size,
],
axis=-1,
),
axis=-1,
),
K.ones((height, width)),
K.zeros((height, width)),
)
return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
def _compute_drop_mask(self, shape):
height, width = shape[1], shape[2]
mask = K.random_binomial(shape, p=self._get_gamma(height, width))
mask *= self._compute_valid_seed_region(height, width)
mask = keras.layers.MaxPool2D(
pool_size=(self.block_size, self.block_size),
padding='same',
strides=1,
data_format='channels_last',
)(mask)
return 1.0 - mask
def call(self, inputs, training=None):
def dropped_inputs():
outputs = inputs
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 3, 1])
shape = K.shape(outputs)
if self.sync_channels:
mask = self._compute_drop_mask([shape[0], shape[1], shape[2], 1])
else:
mask = self._compute_drop_mask(shape)
outputs = outputs * mask *\
(K.cast(K.prod(shape), dtype=K.floatx()) / K.sum(mask))
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
return outputs
return K.in_train_phase(dropped_inputs, inputs, training=training) | 7,815 | 38.474747 | 103 | py |
SA-UNet | SA-UNet-master/Train_chase.py | import os
import numpy as np
import cv2
from keras.callbacks import TensorBoard, ModelCheckpoint
np.random.seed(42)
import scipy.misc as mc
import matplotlib.pyplot as plt
data_location = ''
training_images_loc = data_location + 'CHASE/train/imageS/'
training_label_loc = data_location + 'CHASE/train/labelS/'
validate_images_loc = data_location + 'CHASE/validate/images/'
validate_label_loc = data_location + 'CHASE/validate/labels/'
train_files = os.listdir(training_images_loc)
train_data = []
train_label = []
validate_files = os.listdir(validate_images_loc)
validate_data = []
validate_label = []
desired_size=1008
for i in train_files:
im = mc.imread(training_images_loc + i)
label = mc.imread(training_label_loc + i.split('_')[0]+"_"+i.split('_')[1].split(".")[0] +"_1stHO.png" ,mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
train_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label,
(desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
train_label.append(temp)
for i in validate_files:
im = mc.imread(validate_images_loc + i)
label = mc.imread(validate_label_loc +i.split('_')[0]+'_'+ i.split('_')[1].split(".")[0] +"_1stHO.png" ,mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
validate_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label,
(desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
validate_label.append(temp)
train_data = np.array(train_data)
train_label = np.array(train_label)
validate_data = np.array(validate_data)
validate_label = np.array(validate_label)
x_train = train_data.astype('float32') / 255.
y_train = train_label.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_train = np.reshape(y_train, (len(y_train), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
x_validate = validate_data.astype('float32') / 255.
y_validate = validate_label.astype('float32') / 255.
x_validate = np.reshape(x_validate, (len(x_validate), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_validate = np.reshape(y_validate, (len(y_validate), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
TensorBoard(log_dir='./autoencoder', histogram_freq=0,
write_graph=True, write_images=True)
from SA_UNet import *
model=SA_UNet(input_size=(desired_size,desired_size,3),start_neurons=16,lr=1e-3,keep_prob=0.87,block_size=7)
weight="Model/CHASE/SA_UNet.h5"
restore=False
if restore and os.path.isfile(weight):
model.load_weights(weight)
model_checkpoint = ModelCheckpoint(weight, monitor='val_accuracy', verbose=1, save_best_only=False)
# plot_model(model, to_file='unet_resnet.png', show_shapes=False, show_layer_names=)
history=model.fit(x_train, y_train,
epochs=100, #first 100 with lr=1e-3,,and last 50 with lr=1e-4
batch_size=2,
# validation_split=0.1,
validation_data=(x_validate, y_validate),
shuffle=True,
callbacks= [TensorBoard(log_dir='./autoencoder'), model_checkpoint])
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_accuracy'])
plt.title('SA-UNet Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='lower right')
plt.show()
| 4,715 | 35.84375 | 143 | py |
SA-UNet | SA-UNet-master/Train_drive.py | import os
import cv2
from keras.callbacks import TensorBoard, ModelCheckpoint
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc.pilutil import *
data_location = ''
training_images_loc = data_location + 'DRIVE/train/images/'
training_label_loc = data_location + 'DRIVE/train/labels/'
validate_images_loc = data_location + 'DRIVE/validate/images/'
validate_label_loc = data_location + 'DRIVE/validate/labels/'
train_files = os.listdir(training_images_loc)
train_data = []
train_label = []
validate_files = os.listdir(validate_images_loc)
validate_data = []
validate_label = []
desired_size = 592
for i in train_files:
im = imread(training_images_loc + i)
label = imread(training_label_loc + i.split('_')[0] + '_manual1.png',mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
train_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label, (desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
train_label.append(temp)
for i in validate_files:
im = imread(validate_images_loc + i)
label = imread(validate_label_loc + i.split('_')[0] + '_manual1.png',mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
validate_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label, (desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
validate_label.append(temp)
train_data = np.array(train_data)
train_label = np.array(train_label)
validate_data = np.array(validate_data)
validate_label = np.array(validate_label)
x_train = train_data.astype('float32') / 255.
y_train = train_label.astype('float32') / 255.
x_train = np.reshape(x_train, (
len(x_train), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_train = np.reshape(y_train, (len(y_train), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
x_validate = validate_data.astype('float32') / 255.
y_validate = validate_label.astype('float32') / 255.
x_validate = np.reshape(x_validate, (
len(x_validate), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_validate = np.reshape(y_validate,
(len(y_validate), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
TensorBoard(log_dir='./autoencoder', histogram_freq=0,
write_graph=True, write_images=True)
from SA_UNet import *
model=SA_UNet(input_size=(desired_size,desired_size,3),start_neurons=16,lr=1e-3,keep_prob=0.82,block_size=7)
model.summary()
weight="Model/DRIVE/SA_UNet.h5"
restore=True
if restore and os.path.isfile(weight):
model.load_weights(weight)
model_checkpoint = ModelCheckpoint(weight, monitor='val_accuracy', verbose=1, save_best_only=False)
history=model.fit(x_train, y_train,
epochs=100, #first 100 with lr=1e-3,,and last 50 with lr=1e-4
batch_size=4,
# validation_split=0.05,
validation_data=(x_validate, y_validate),
shuffle=True,
callbacks= [TensorBoard(log_dir='./autoencoder'), model_checkpoint])
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_accuracy'])
plt.title('SA-UNet Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='lower right')
plt.show()
| 4,541 | 35.336 | 119 | py |
SA-UNet | SA-UNet-master/SA_UNet.py |
from keras.optimizers import *
from keras.models import Model
from keras.layers import Input,Conv2DTranspose, MaxPooling2D,BatchNormalization,concatenate,Activation
from Spatial_Attention import *
def Backbone(input_size=(512, 512, 3), block_size=7,keep_prob=0.9,start_neurons=16,lr=1e-3):
inputs = Input(input_size)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(inputs)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1= BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(conv1)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(conv2)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(conv3)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(convm)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(convm)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
output_layer_noActi = Conv2D(1, (1, 1), padding="same", activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
model = Model(input=inputs, output=output_layer)
model.compile(optimizer=Adam(lr=lr), loss='binary_crossentropy', metrics=['accuracy'])
return model
def SA_UNet(input_size=(512, 512, 3), block_size=7,keep_prob=0.9,start_neurons=16,lr=1e-3):
inputs = Input(input_size)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(inputs)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1= BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(conv1)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(conv2)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(conv3)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
convm = spatial_attention(convm)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(convm)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(convm)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
output_layer_noActi = Conv2D(1, (1, 1), padding="same", activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
model = Model(input=inputs, output=output_layer)
model.compile(optimizer=Adam(lr=lr), loss='binary_crossentropy', metrics=['accuracy'])
return model
| 9,007 | 45.43299 | 102 | py |
SA-UNet | SA-UNet-master/Spatial_Attention.py | from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, \
Conv2D, Add, Activation, Lambda,Conv1D
from Dropblock import *
def spatial_attention(input_feature):
kernel_size = 7
if K.image_data_format() == "channels_first":
channel = input_feature._keras_shape[1]
cbam_feature = Permute((2, 3, 1))(input_feature)
else:
channel = input_feature._keras_shape[-1]
cbam_feature = input_feature
avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
assert avg_pool._keras_shape[-1] == 1
max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
assert max_pool._keras_shape[-1] == 1
concat = Concatenate(axis=3)([avg_pool, max_pool])
assert concat._keras_shape[-1] == 2
cbam_feature = Conv2D(filters=1,
kernel_size=kernel_size,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer='he_normal',
use_bias=False)(concat)
assert cbam_feature._keras_shape[-1] == 1
if K.image_data_format() == "channels_first":
cbam_feature = Permute((3, 1, 2))(cbam_feature)
return multiply([input_feature, cbam_feature])
| 1,364 | 40.363636 | 118 | py |
pegnn | pegnn-master/train_autoencoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch_geometric.loader import DataLoader
import json
from src.datasets import CSVDataset
from src.utils.scaler import LatticeScaler
from src.utils.visualize import get_fig
from src.utils.debug import check_grad
from src.utils.io import AggregateBatch
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import random
import datetime
parser = argparse.ArgumentParser(description="train denoising model")
parser.add_argument("--hparams", "-H", default=None, help="json file")
parser.add_argument("--tensorboard", "-t", default="./runs_autoencoder")
parser.add_argument("--dataset", "-D", default="./data/mp_20")
parser.add_argument("--device", "-d", default="cuda")
parser.add_argument("--verbose", "-v", default=False, action="store_true")
parser.add_argument("--log-interval", "-l", default=128, type=int)
parser.add_argument("--debug", "-g", default=False, action="store_true")
args = parser.parse_args()
from src.models.operator.autoencoder import AutoEncoder, AutoEncoderMLP
from src.models.operator.loss import get_loss, LossLatticeParameters
from src.models.operator.utils import (
LogSpike,
AggregateMetrics,
training_iterator,
validation_iterator,
testing_iterator,
Checkpoints,
Hparams,
)
# run name
dataset_name = os.path.split(args.dataset)[1]
tday = datetime.datetime.now()
run_name = tday.strftime(
f"training_%Y_%m_%d_%H_%M_%S_{dataset_name}_{random.randint(0,1000):<03d}"
)
print("run name:", run_name)
# basic setup
device = args.device
log_interval = args.log_interval
output_directory = args.tensorboard
# setup hyperparameters
hparams = Hparams()
if args.hparams is not None:
hparams.from_json(args.hparams)
print("hyper-parameters:")
print(json.dumps(hparams.dict(), indent=4))
# setup logs
log_dir = os.path.join(output_directory, run_name)
os.makedirs(output_directory, exist_ok=True)
writer = SummaryWriter(log_dir=log_dir)
hparams.to_json(os.path.join(log_dir, "hparams.json"))
log_spike = LogSpike(log_dir, threshold=0.5, verbose=args.verbose, debug=args.debug)
log_metrics_train = AggregateMetrics(writer, "train")
log_metrics_valid = AggregateMetrics(writer, "valid")
log_metrics_test = AggregateMetrics(writer, "test")
# load data and data scaler
dataset_train = CSVDataset(
os.path.join(args.dataset, "train.csv"), verbose=args.verbose, multithread=True
)
dataset_val = CSVDataset(
os.path.join(args.dataset, "val.csv"), verbose=args.verbose, multithread=True
)
dataset_test = CSVDataset(
os.path.join(args.dataset, "test.csv"), verbose=args.verbose, multithread=True
)
dataloader_train = DataLoader(dataset_train, batch_size=hparams.batch_size)
dataloader_val = DataLoader(dataset_val, batch_size=hparams.batch_size)
dataloader_test = DataLoader(dataset_test, batch_size=hparams.batch_size)
lattice_scaler = LatticeScaler()
lattice_scaler.fit(dataloader_train, args.verbose)
lattice_scaler = lattice_scaler.to(args.device)
# setup model, loss and optimizer
model = AutoEncoder(
features=hparams.features,
knn=hparams.knn,
ops_config=hparams.ops_config,
layers=hparams.mpnn_layers,
scale_limit_weights=hparams.scale_limit_weights,
scale_hidden_dim=hparams.scale_hidden_dim,
scale_limit_actions=hparams.scale_limit_actions,
scale_reduce_rho=hparams.scale_reduce_rho,
).to(device)
if hparams.loss == "parameters_l1":
loss_fn = LossLatticeParameters(lattice_scaler=lattice_scaler, distance="l1")
elif hparams.loss == "parameters_mse":
loss_fn = LossLatticeParameters(lattice_scaler=lattice_scaler, distance="mse")
else:
raise Exception(f"unknown loss {hparams.loss}")
opti = optim.Adam(model.parameters(), lr=hparams.lr, betas=(hparams.beta1, 0.999))
# setup checkpoint and training loop
checkpoints = Checkpoints(log_dir, model, opti)
data_it, tqdm_bar = training_iterator(
dataloader_train, hparams.total_step, verbose=args.verbose
)
for opt_step, batch in data_it:
model.train()
batch = batch.to(args.device)
# training step
opti.zero_grad()
loss, metrics = get_loss(batch, model, loss_fn)
loss.backward()
check_grad(model, verbose=args.verbose, debug=args.debug)
if hparams.grad_clipping is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clipping)
opti.step()
# logs
log_spike.log(loss, opt_step, batch, model, opti)
log_metrics_train.append(loss, metrics)
if args.verbose:
tqdm_bar.set_description(log_metrics_train.preview())
# validation
if (opt_step % log_interval) == 0:
log_metrics_train.log(opt_step)
model.eval()
with torch.no_grad():
fig = None
for batch in validation_iterator(dataloader_val, verbose=args.verbose):
batch = batch.to(device)
if fig is None:
fig = get_fig(batch, model, 8, lattice_scaler=lattice_scaler)
loss, metrics = get_loss(batch, model, loss_fn)
log_metrics_valid.append(loss, metrics)
metrics = log_metrics_valid.log(opt_step)
writer.add_figure("reconstruction", fig, opt_step)
checkpoints.step(opt_step, metrics)
# testing from the best checkpoint
model = checkpoints.load_best()
model = model.to(device)
model.eval()
aggregate = AggregateBatch()
with torch.no_grad():
for batch in testing_iterator(dataloader_test, verbose=args.verbose):
batch = batch.to(device)
loss, metrics, full_batch = get_loss(
batch, model, loss_fn, return_batch=True
)
aggregate.append(*full_batch)
log_metrics_test.append(loss, metrics)
metrics = log_metrics_test.log(opt_step, hparams=hparams.dict())
with open(os.path.join(log_dir, "metrics.json"), "w") as fp:
json.dump(metrics, fp, indent=4)
aggregate.write(os.path.join(log_dir, "output/test"), verbose=args.verbose)
print("\ntest metrics:")
print(json.dumps(metrics, indent=4))
| 6,650 | 31.602941 | 88 | py |
pegnn | pegnn-master/train_benchmark.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch_geometric.loader import DataLoader
import json
from src.datasets import CSVDataset
from src.utils.scaler import LatticeScaler
from src.utils.visualize import get_fig
from src.utils.debug import check_grad
from src.utils.io import AggregateBatch
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import random
import datetime
parser = argparse.ArgumentParser(description="train denoising model")
parser.add_argument("--hparams", "-H", default=None, help="json file")
parser.add_argument("--tensorboard", "-t", default="./runs_benchmark")
parser.add_argument("--dataset", "-D", default="./data/carbon_24")
parser.add_argument("--device", "-d", default="cuda")
parser.add_argument("--verbose", "-v", default=False, action="store_true")
parser.add_argument("--log-interval", "-l", default=128, type=int)
parser.add_argument("--debug", "-g", default=False, action="store_true")
args = parser.parse_args()
from src.models.operator.denoise import Denoise
from src.models.operator.loss import (
get_loss,
LossLatticeParameters,
LossActionMatrixDistance,
LossLatticeMetric
)
from src.models.operator.utils import (
LogSpike,
AggregateMetrics,
training_iterator,
validation_iterator,
testing_iterator,
Checkpoints,
Hparams,
)
# run name
dataset_name = os.path.split(args.dataset)[1]
tday = datetime.datetime.now()
run_name = tday.strftime(
f"training_%Y_%m_%d_%H_%M_%S_{dataset_name}_{random.randint(0,1000):<03d}"
)
print("run name:", run_name)
# basic setup
device = args.device
log_interval = args.log_interval
output_directory = args.tensorboard
# setup hyperparameters
hparams = Hparams()
if args.hparams is not None:
hparams.from_json(args.hparams)
print("hyper-parameters:")
print(json.dumps(hparams.dict(), indent=4))
# setup logs
log_dir = os.path.join(output_directory, run_name)
os.makedirs(output_directory, exist_ok=True)
writer = SummaryWriter(log_dir=log_dir)
hparams.to_json(os.path.join(log_dir, "hparams.json"))
log_spike = LogSpike(log_dir, threshold=0.5,
verbose=args.verbose, debug=args.debug)
log_metrics_train = AggregateMetrics(writer, "train")
log_metrics_valid = AggregateMetrics(writer, "valid")
log_metrics_test = AggregateMetrics(writer, "test")
# load data and data scaler
dataset_train = CSVDataset(os.path.join(
args.dataset, "train.csv"), verbose=args.verbose, knn=hparams.knn, multithread=False)
dataset_val = CSVDataset(os.path.join(
args.dataset, "val.csv"), verbose=args.verbose, knn=hparams.knn, multithread=False)
dataset_test = CSVDataset(os.path.join(
args.dataset, "test.csv"), verbose=args.verbose, knn=hparams.knn, multithread=False)
dataloader_train = DataLoader(dataset_train, batch_size=hparams.batch_size)
dataloader_val = DataLoader(dataset_val, batch_size=hparams.batch_size)
dataloader_test = DataLoader(dataset_test, batch_size=hparams.batch_size)
lattice_scaler = LatticeScaler()
lattice_scaler.fit(dataloader_train, args.verbose)
lattice_scaler = lattice_scaler.to(args.device)
# setup model, loss and optimizer
model = Denoise(
features=hparams.features,
knn=hparams.knn,
ops_config=hparams.ops_config,
mpnn=hparams.mpnn_layers,
steps=hparams.steps,
scale_limit_weights=hparams.scale_limit_weights,
scale_hidden_dim=hparams.scale_hidden_dim,
scale_layers=hparams.scale_layers,
scale_limit_actions=hparams.scale_limit_actions,
scale_reduce_rho=hparams.scale_reduce_rho,
repeated=hparams.repeated,
mlp_lattice=hparams.mlp_lattice,
lattice_scaler=lattice_scaler
).to(device)
if hparams.loss == "metric_l1":
loss_fn = LossLatticeMetric(distance="l1")
elif hparams.loss == "metric_mse":
loss_fn = LossLatticeMetric(distance="mse")
elif hparams.loss == "metric_trace":
loss_fn = LossLatticeMetric(distance="trace")
elif hparams.loss == "actions_l1":
loss_fn = LossActionMatrixDistance(distance="l1")
elif hparams.loss == "actions_mse":
loss_fn = LossActionMatrixDistance(distance="mse")
elif hparams.loss == "actions_trace":
loss_fn = LossActionMatrixDistance(distance="trace")
elif hparams.loss == "parameters_l1":
loss_fn = LossLatticeParameters(
lattice_scaler=lattice_scaler, distance="l1")
elif hparams.loss == "parameters_mse":
loss_fn = LossLatticeParameters(
lattice_scaler=lattice_scaler, distance="mse")
else:
raise Exception(f"unknown loss {hparams.loss}")
opti = optim.Adam(model.parameters(), lr=hparams.lr,
betas=(hparams.beta1, 0.999))
# setup checkpoint and training loop
checkpoints = Checkpoints(log_dir, model, opti)
data_it, tqdm_bar = training_iterator(
dataloader_train, hparams.total_step, verbose=args.verbose
)
for opt_step, batch in data_it:
model.train()
batch = batch.to(args.device)
# training step
opti.zero_grad()
loss, metrics = get_loss(batch, model, loss_fn)
loss.backward()
check_grad(model, verbose=args.verbose, debug=args.debug)
if hparams.grad_clipping is not None:
torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clipping)
opti.step()
# logs
log_spike.log(loss, opt_step, batch, model, opti)
log_metrics_train.append(loss, metrics)
if args.verbose:
tqdm_bar.set_description(log_metrics_train.preview())
# validation
if (opt_step % log_interval) == 0:
log_metrics_train.log(opt_step)
model.eval()
with torch.no_grad():
fig = None
for batch in validation_iterator(dataloader_val, verbose=args.verbose):
batch = batch.to(device)
if fig is None:
fig = get_fig(batch, model, 8)
loss, metrics = get_loss(batch, model, loss_fn)
log_metrics_valid.append(loss, metrics)
metrics = log_metrics_valid.log(opt_step)
writer.add_figure("denoising", fig, opt_step)
checkpoints.step(opt_step, metrics)
# testing from the best checkpoint
model = checkpoints.load_best()
model = model.to(device)
model.eval()
aggregate = AggregateBatch()
with torch.no_grad():
for batch in testing_iterator(dataloader_test, verbose=args.verbose):
batch = batch.to(device)
loss, metrics, full_batch = get_loss(
batch, model, loss_fn, return_batch=True)
aggregate.append(*full_batch)
log_metrics_test.append(loss, metrics)
metrics = log_metrics_test.log(opt_step, hparams=hparams.dict())
with open(os.path.join(log_dir, "metrics.json"), "w") as fp:
json.dump(metrics, fp, indent=4)
aggregate.write(os.path.join(log_dir, "output/test"), verbose=args.verbose)
print("\ntest metrics:")
print(json.dumps(metrics, indent=4))
| 7,547 | 32.251101 | 93 | py |
pegnn | pegnn-master/src/models/operator/loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.datasets.data import CrystalData
from src.utils.scaler import LatticeScaler
from src.models.operator.utils import lattice_params_to_matrix_torch
from typing import Dict, Tuple
def get_metrics(batch: CrystalData, reconstructed: torch.FloatTensor, scaler: LatticeScaler) -> Dict[str, torch.FloatTensor]:
lengths_real, angles_real = scaler.get_lattices_parameters(batch.cell)
if isinstance(reconstructed, tuple):
lengths_denoised, angles_denoised = reconstructed
else:
lengths_denoised, angles_denoised = scaler.get_lattices_parameters(
reconstructed)
lengths_dist = torch.abs(lengths_denoised - lengths_real)
angles_dist = torch.abs(angles_denoised - angles_real)
return {
"lengths_error": lengths_dist.mean().detach(),
"angles_error": angles_dist.mean().detach()
}
class LossLattice(nn.Module):
def __init__(self, lattice_scaler: LatticeScaler):
super().__init__()
self.lattice_scaler = lattice_scaler
def forward(self, batch: CrystalData, reconstructed: torch.FloatTensor) -> torch.FloatTensor:
raise NotImplementedError
def get_loss(batch: CrystalData, model: nn.Module, loss_fn: LossLattice, return_batch: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
reconstructed = model(
cell=batch.cell, x=batch.pos, z=batch.z, struct_size=batch.num_atoms
)
loss = loss_fn(batch, reconstructed)
rec = reconstructed
if isinstance(rec, tuple):
rec = loss_fn.lattice_scaler.denormalise(
rec[0], rec[1])
metrics = get_metrics(batch, rec, loss_fn.lattice_scaler)
if isinstance(rec, tuple):
rec = lattice_params_to_matrix_torch(*rec)
if return_batch:
return loss, metrics, (batch.cell, rec, batch.pos, batch.z, batch.num_atoms)
return loss, metrics
class LossLatticeParameters(LossLattice):
def __init__(self, lattice_scaler: LatticeScaler, distance: str = "l1"):
super().__init__(lattice_scaler=lattice_scaler)
assert distance in ["l1", "mse"]
self.distance = distance
def forward(self, batch: CrystalData, reconstructed: torch.FloatTensor) -> torch.FloatTensor:
param_real = self.lattice_scaler.normalise_lattice(batch.cell)
if isinstance(reconstructed, tuple):
param_reconstructed = reconstructed
else:
param_reconstructed = self.lattice_scaler.normalise_lattice(
reconstructed)
param_real = torch.cat(param_real, dim=1)
param_reconstructed = torch.cat(param_reconstructed, dim=1)
if self.distance == "l1":
return F.l1_loss(param_reconstructed, param_real)
elif self.distance == "mse":
return F.mse_loss(param_reconstructed, param_real)
| 2,878 | 32.476744 | 148 | py |
pegnn | pegnn-master/src/models/operator/utils.py | import torch
import torch.nn as nn
import tqdm
import os
import json
from dataclasses import dataclass
def save_step(spike_dir, batch, model, opti):
os.makedirs(spike_dir, exist_ok=True)
batch_dict = {
"cell": batch.cell.tolist(),
"pos": batch.pos.tolist(),
"z": batch.z.tolist(),
"num_atoms": batch.num_atoms.tolist(),
}
with open(os.path.join(spike_dir, "batch.json"), "w") as fp:
json.dump(batch_dict, fp)
grad_dict = {}
for k, p in model.named_parameters():
if p.grad is not None:
grad_dict[k] = p.grad.tolist()
with open(os.path.join(spike_dir, "grad.json"), "w") as fp:
json.dump(grad_dict, fp)
model_dict = {}
for k, p in model.named_parameters():
model_dict[k] = p.tolist()
with open(os.path.join(spike_dir, "model.json"), "w") as fp:
json.dump(model_dict, fp)
torch.save(opti.state_dict(), os.path.join(spike_dir, "opti.pt"))
class LogSpike:
def __init__(self, log_dir, threshold=0.5, verbose=False, debug=False):
self.log_dir = log_dir
self.prev_loss = None
self.verbose = verbose
self.debug = debug
self.threshold = threshold
def log(self, loss, opt_step, batch, model, opti):
if self.prev_loss is not None:
if abs((loss.item() / prev_loss) - 1.0) > self.threshold:
if self.debug:
spike_dir = os.path.join(
self.log_dir,
"spike",
f"epoch_{opt_step}_loss_{loss.item():.3f}",
)
save_step(spike_dir, batch, model, opti)
if self.verbose:
print(
f"loss spike detected (from {prev_loss:.6f} to {loss.item():.6f})"
)
prev_loss = loss.item()
class AggregateMetrics:
def __init__(self, writer, label):
self.writer = writer
self.label = label
self.loss = []
self.lengths_error = []
self.angles_error = []
def append(self, loss, metrics):
self.loss.append(loss.item())
self.lengths_error.append(metrics["lengths_error"].item())
self.angles_error.append(metrics["angles_error"].item())
def preview(self):
return " ".join(
[
f"loss: {self.loss[-1]:.4f}",
f"lengths error: {self.lengths_error[-1]:.4f}",
f"angles error: {self.angles_error[-1]:.4f}",
]
)
def log(self, opt_step, clear=True, hparams=None):
loss = torch.tensor(self.loss).mean().item()
lengths_error = torch.tensor(self.lengths_error).mean().item()
angles_error = torch.tensor(self.angles_error).mean().item()
if self.writer is not None:
self.writer.add_scalar(f"{self.label}/loss", loss, opt_step)
self.writer.add_scalar(
f"{self.label}/lengths_error", lengths_error, opt_step)
self.writer.add_scalar(
f"{self.label}/angles_error", angles_error, opt_step)
metrics = {
"loss": loss,
"lengths_error": lengths_error,
"angles_error": angles_error
}
if (self.writer is not None) and (hparams is not None):
self.writer.add_hparams(hparams, metrics)
if clear:
self.loss = []
self.lengths_error = []
self.angles_error = []
return metrics
def training_iterator(loader, total_step, verbose=True):
def data_loop(dataloader, total):
it = 0
while it < total:
for _, batch in zip(range(total - it), dataloader):
yield batch
it += len(dataloader)
data_it = data_loop(loader, total_step + 1)
if verbose:
tqdm_bar = tqdm.tqdm(data_it, total=total_step)
data_it = iter(tqdm_bar)
else:
tqdm_bar = None
return enumerate(data_it), tqdm_bar
def validation_iterator(loader, verbose=True):
if verbose:
return tqdm.tqdm(loader, desc="validation", position=1, leave=False)
return loader
def testing_iterator(loader, verbose=True):
if verbose:
return tqdm.tqdm(loader, desc="testing")
return loader
class Checkpoints:
def __init__(self, log_dir, model, opti):
self.log_dir = log_dir
self.opti = opti
self.model = model
torch.save(opti.state_dict(), os.path.join(log_dir, "opti.pt"))
torch.save(model.state_dict(), os.path.join(log_dir, "model.pt"))
self.best = float("inf")
self.filename_best = os.path.join(log_dir, "model.pt")
def step(self, opt_step, metrics):
metrics_sum = (
metrics["lengths_error"]
+ metrics["angles_error"]
)
if metrics_sum < self.best:
self.best = metrics_sum
if "best" in self.filename_best:
delete_file = self.filename_best
else:
delete_file = None
backup_filename = (
f"best_model_batch_{opt_step}_val_{self.best:.3f}".replace(
".", "_")
)
self.filename_best = os.path.join(
self.log_dir, backup_filename + ".pt")
torch.save(self.model.state_dict(), self.filename_best)
if delete_file is not None:
os.remove(delete_file)
torch.save(self.model.state_dict(),
os.path.join(self.log_dir, "model.pt"))
torch.save(self.opti.state_dict(),
os.path.join(self.log_dir, "opti.pt"))
def load_best(self):
weights = torch.load(self.filename_best,
map_location=torch.device("cpu"))
self.model.load_state_dict(weights)
return self.model
@dataclass
class Hparams:
batch_size: int = 1 << 8
total_step: int = 1 << 15
lr: float = 1e-4 # included every time in grid search
beta1: float = 0.9
grad_clipping: float = 1.0
loss: str = "parameters_l1"
knn: int = 16
features: int = 128
ops_config_type: str = "grad"
ops_config_normalize: bool = True
ops_config_edges: str = "n_ij"
ops_config_triplets: str = "n_ij|n_ik|angle"
mpnn_layers: int = 8
steps: int = 4
scale_limit_weights: float = 0.0 # included every time in grid search
scale_hidden_dim: int = 256
scale_layers: int = 1
scale_limit_actions: float = 0.5 # included every time in grid search
scale_reduce_rho: str = "mean"
repeated: bool = False
mlp_lattice: bool = False
@property
def ops_config(self):
def split(s, delimiter):
if len(s) > 0:
return s.split(delimiter)
return []
return {
"type": self.ops_config_type,
"normalize": self.ops_config_normalize,
"edges": split(self.ops_config_edges, "|"),
"triplets": split(self.ops_config_triplets, "|"),
}
def from_json(self, file_name):
with open(file_name, "r") as fp:
hparams = json.load(fp)
for key, value in hparams.items():
assert key in self.__dict__
self.__dict__[key] = value
def to_json(self, file_name):
with open(file_name, "w") as fp:
json.dump(self.__dict__, fp, indent=4)
def dict(self):
return self.__dict__
def build_mlp(in_dim, hidden_dim, fc_num_layers, out_dim):
mods = [nn.Linear(in_dim, hidden_dim), nn.ReLU()]
for i in range(fc_num_layers - 1):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU()]
mods += [nn.Linear(hidden_dim, out_dim)]
return nn.Sequential(*mods)
def lattice_params_to_matrix_torch(lengths, angles):
"""Batched torch version to compute lattice matrix from params.
lengths: torch.Tensor of shape (N, 3), unit A
angles: torch.Tensor of shape (N, 3), unit degree
"""
angles_r = torch.deg2rad(angles)
coses = torch.cos(angles_r)
sins = torch.sin(angles_r)
val = (coses[:, 0] * coses[:, 1] - coses[:, 2]) / (sins[:, 0] * sins[:, 1])
# Sometimes rounding errors result in values slightly > 1.
val = torch.clamp(val, -1., 1.)
gamma_star = torch.arccos(val)
vector_a = torch.stack([
lengths[:, 0] * sins[:, 1],
torch.zeros(lengths.size(0), device=lengths.device),
lengths[:, 0] * coses[:, 1]], dim=1)
vector_b = torch.stack([
-lengths[:, 1] * sins[:, 0] * torch.cos(gamma_star),
lengths[:, 1] * sins[:, 0] * torch.sin(gamma_star),
lengths[:, 1] * coses[:, 0]], dim=1)
vector_c = torch.stack([
torch.zeros(lengths.size(0), device=lengths.device),
torch.zeros(lengths.size(0), device=lengths.device),
lengths[:, 2]], dim=1)
return torch.stack([vector_a, vector_b, vector_c], dim=1)
| 8,969 | 28.409836 | 90 | py |
pegnn | pegnn-master/src/models/operator/denoise.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import src.models.layers.operator.gnn as ops
from src.models.operator.utils import build_mlp, lattice_params_to_matrix_torch
from src.utils.geometry import Geometry
from torch_scatter import scatter_mean
class Denoise(nn.Module):
def __init__(
self,
features: int,
knn: int,
ops_config: dict,
mpnn: int,
steps: int,
scale_limit_weights: float,
scale_hidden_dim: int,
scale_layers: int,
scale_limit_actions: float,
scale_reduce_rho: str,
repeated: bool,
mlp_lattice: bool = False,
lattice_scaler=None,
):
super(Denoise, self).__init__()
self.knn = knn
self.steps = steps
self.repeated = repeated
self.mlp_lattice = mlp_lattice
self.embedding = nn.Embedding(100, features)
self.mpnn = nn.ModuleList(
[ops.MPNN(features=features) for _ in range(mpnn)])
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
if self.mlp_lattice:
assert lattice_scaler is not None
self.lattice_scaler = lattice_scaler
self.lattice_pred = build_mlp(features, 128, 4, 6)
elif self.repeated:
self.update = ops.MPNN(features=features)
self.actions = ops.Actions(
features,
knn,
ops_config,
scale_k=scale_limit_weights,
hidden_dim=scale_hidden_dim,
n_layers=scale_layers,
limit_actions=scale_limit_actions,
reduce_rho=scale_reduce_rho,
)
else:
self.update = nn.ModuleList(
[ops.MPNN(features=features) for _ in range(self.steps)]
)
self.actions = nn.ModuleList(
[
ops.Actions(
features,
knn,
ops_config,
scale_k=scale_limit_weights,
hidden_dim=scale_hidden_dim,
n_layers=scale_layers,
limit_actions=scale_limit_actions,
reduce_rho=scale_reduce_rho,
)
for _ in range(self.steps)
]
)
self.it = 0
def actions_init(self, cell: torch.FloatTensor) -> torch.FloatTensor:
return self.I.unsqueeze(0).repeat(cell.shape[0], 1, 1)
@property
def device(self):
return self.embedding.weight.device
def forward(
self,
cell: torch.FloatTensor,
x: torch.FloatTensor,
z: torch.FloatTensor,
struct_size: torch.FloatTensor,
edge_index: torch.LongTensor = None,
edge_attr: torch.LongTensor = None,
step: int = None,
):
geometry = Geometry(cell, struct_size, x % 1, knn=self.knn,
edge_index=edge_index, edge_attr=edge_attr)
if step is None:
step = self.steps
h = self.embedding(z)
for l in self.mpnn:
h = l(geometry, h)
if self.mlp_lattice:
latent = scatter_mean(h, geometry.batch, dim=0)
lattice = self.lattice_pred(latent)
lengths, angles = self.lattice_scaler.denormalise(
lattice[:, :3], lattice[:, 3:]
)
cell_prime = lattice_params_to_matrix_torch(lengths, angles)
action = torch.bmm(cell_prime, torch.inverse(cell))
return cell_prime, [cell_prime], [action]
else:
action_rho = self.actions_init(cell)
rho_list = []
actions_list = []
if self.repeated:
actions = self.actions
update = self.update
for i in range(step):
if not self.repeated:
actions = self.actions[i]
update = self.update[i]
h = update(geometry, h)
edges_weights, triplets_weights = actions(geometry, h)
rho_prime, action = actions.apply(
geometry, edges_weights, triplets_weights
)
action_rho = torch.bmm(action, action_rho)
rho_prime = torch.bmm(action_rho, cell)
rho_list.append(rho_prime)
actions_list.append(action_rho)
geometry.rho = rho_prime
geometry.update_vectors()
return geometry.rho, rho_list, actions_list
| 4,657 | 29.051613 | 79 | py |
pegnn | pegnn-master/src/models/operator/autoencoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import src.models.layers.operator.gnn as ops
from src.models.operator.utils import build_mlp
from src.utils.geometry import Geometry
from torch_scatter import scatter_mean
from typing import Tuple
class AutoEncoder(nn.Module):
def __init__(
self,
features: int,
knn: int,
ops_config: dict,
layers: int,
scale_limit_weights: float,
scale_hidden_dim: int,
scale_limit_actions: float,
scale_reduce_rho: str,
):
super(AutoEncoder, self).__init__()
self.knn = knn
self.layers = layers
self.embedding = nn.Embedding(100, features)
self.mpnn = nn.ModuleList([ops.MPNN(features=features) for _ in range(layers)])
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.update = nn.ModuleList(
[ops.MPNN(features=features) for _ in range(layers)]
)
self.actions = nn.ModuleList(
[
ops.Actions(
features,
knn,
ops_config,
scale_k=scale_limit_weights,
hidden_dim=scale_hidden_dim,
n_layers=1,
limit_actions=scale_limit_actions,
reduce_rho=scale_reduce_rho,
)
for _ in range(layers)
]
)
def actions_init(self, cell: torch.FloatTensor) -> torch.FloatTensor:
return self.I.unsqueeze(0).repeat(cell.shape[0], 1, 1)
@property
def device(self):
return self.embedding.weight.device
def forward(
self,
cell: torch.FloatTensor,
x: torch.FloatTensor,
z: torch.FloatTensor,
struct_size: torch.LongTensor,
):
cell = self.actions_init(cell)
geometry = Geometry(cell, struct_size, x % 1, knn=self.knn)
geometry.filter_triplets(geometry.triplets_sin_ijk.abs() > 1e-3)
h = self.embedding(z)
for l in self.mpnn:
h = l(geometry, h)
action_rho = self.actions_init(cell)
rho_list = []
actions_list = []
for i in range(self.layers):
actions = self.actions[i]
update = self.update[i]
h = update(geometry, h)
edges_weights, triplets_weights = actions(geometry, h)
rho_prime, action = actions.apply(geometry, edges_weights, triplets_weights)
action_rho = torch.bmm(action, action_rho)
rho_prime = torch.bmm(action_rho, cell)
rho_list.append(rho_prime)
actions_list.append(action_rho)
geometry.cell = rho_prime
geometry.update_vectors()
return geometry.cell # , rho_list, actions_list
class AutoEncoderMLP(nn.Module):
def __init__(
self,
features: int,
knn: int,
layers: int,
lattice_scaler=None,
):
super(AutoEncoderMLP, self).__init__()
self.knn = knn
self.layers = layers
self.lattice_scaler = lattice_scaler
self.embedding = nn.Embedding(100, features)
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.mpnn = nn.ModuleList([ops.MPNN(features=features) for _ in range(layers)])
self.lattice_pred = build_mlp(features, 128, 4, 6)
@property
def device(self):
return self.embedding.weight.device
def actions_init(self, cell: torch.FloatTensor) -> torch.FloatTensor:
return self.I.unsqueeze(0).repeat(cell.shape[0], 1, 1)
def forward(
self,
cell: torch.FloatTensor,
x: torch.FloatTensor,
z: torch.FloatTensor,
struct_size: torch.LongTensor,
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
cell = self.actions_init(cell)
geometry = Geometry(cell, struct_size, x % 1, knn=self.knn)
h = self.embedding(z)
for l in self.mpnn:
h = l(geometry, h)
latent = scatter_mean(h, geometry.batch, dim=0)
lattice = self.lattice_pred(latent)
return (lattice[:, :3], lattice[:, 3:])
| 4,223 | 25.236025 | 88 | py |
pegnn | pegnn-master/src/models/layers/random.py | import torch
import torch.nn as nn
class RandomMatrixSL3Z(nn.Module):
def __init__(self):
super().__init__()
generators = torch.tensor(
[
[[1, 0, 1], [0, -1, -1], [0, 1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
[[0, 1, 0], [1, 0, 0], [-1, -1, -1]],
],
dtype=torch.float32,
)
generators = torch.cat((generators, torch.inverse(generators)), dim=0)
self.generators = nn.Parameter(generators, requires_grad=False)
@property
def device(self):
return self.generators.device
def forward(self, batch_size, e=5):
n = 1 << e
g_idx = torch.randint(
0, self.generators.shape[0], (batch_size * n,), device=self.device
)
M = self.generators[g_idx]
for _ in range(e):
M = M.view(2, -1, 3, 3)
M = torch.bmm(M[0], M[1])
return torch.round(M)
def apply_sl3z(g, rho, x, batch):
rho_prime = torch.bmm(rho, torch.inverse(g))
x_prime = (torch.bmm(g[batch], x.unsqueeze(2)) % 1).squeeze(2)
return rho_prime, x_prime
class RandomSLZ(nn.Module):
def __init__(self):
super().__init__()
self.generator = RandomMatrixSL3Z()
def forward(self, rho, x, batch):
g = self.generator(rho.shape[0])
rho_prime = torch.bmm(rho, g)
x_prime = (torch.bmm(torch.inverse(g)[batch], x.unsqueeze(2)) % 1).squeeze(2)
return rho_prime, x_prime
| 1,510 | 25.982143 | 85 | py |
pegnn | pegnn-master/src/models/layers/operator/gnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter
from typing import Tuple
from src.utils.geometry import Geometry
from src.utils.shape import build_shapes, assert_tensor_match, shape
from src.models.layers.operator.operator import Operator, make_operator
class EdgeProj(nn.Module):
def __init__(
self,
features: int,
hidden_dim: int,
n_layers: int,
output_dim: int,
cutoff: float = 10,
step: float = 0.1,
bias: bool = False,
):
super(EdgeProj, self).__init__()
if output_dim is None:
output_dim = features
self.cutoff = cutoff
self.step = step
self.mu = nn.Parameter(
torch.arange(0, self.cutoff, self.step, dtype=torch.float32)
)
layers = [
nn.Linear(2 * features + self.mu.shape[0], hidden_dim, bias=False),
nn.SiLU(),
]
for i in range(n_layers):
layers.extend([nn.Linear(hidden_dim, hidden_dim, bias=False), nn.SiLU()])
layers.append(nn.Linear(hidden_dim, output_dim, bias=bias))
self.mlp = nn.Sequential(*layers)
self.reset_parameters()
def reset_parameters(self):
for layer in self.mlp:
if isinstance(layer, nn.Linear):
torch.nn.init.xavier_normal_(layer.weight)
def get_last_layer(self):
last_layer = None
for layer in self.mlp:
if isinstance(layer, nn.Linear):
last_layer = layer
return last_layer
def forward(self, h, src, dst, edge_norm):
d_ij_emb = torch.exp(
-1 / self.step * (self.mu[None, :] - edge_norm[:, None]).pow(2)
)
inputs = torch.cat((h[src], h[dst], d_ij_emb), dim=-1)
return self.mlp(inputs)
class FaceProj(nn.Module):
def __init__(
self,
features: int,
hidden_dim: int,
n_layers: int,
output_dim: int,
cutoff: float = 10,
step: float = 0.1,
bias: bool = False,
):
super(FaceProj, self).__init__()
if output_dim is None:
output_dim = features
self.cutoff = cutoff
self.step = step
self.mu = nn.Parameter(
torch.arange(0, self.cutoff, self.step, dtype=torch.float32)
)
layers = [
nn.Linear(3 * features + self.mu.shape[0] * 2 + 2, hidden_dim, bias=False),
nn.SiLU(),
]
for i in range(n_layers):
layers.extend([nn.Linear(hidden_dim, hidden_dim, bias=False), nn.SiLU()])
layers.append(nn.Linear(hidden_dim, output_dim, bias=bias))
self.mlp = nn.Sequential(*layers)
self.reset_parameters()
def reset_parameters(self):
for layer in self.mlp:
if isinstance(layer, nn.Linear):
torch.nn.init.xavier_normal_(layer.weight)
def get_last_layer(self):
last_layer = None
for layer in self.mlp:
if isinstance(layer, nn.Linear):
last_layer = layer
return last_layer
def forward(self, h, triplets, norm_ij, norm_ik, cos_ijk, sin_ijk):
d_ij_emb = torch.exp(
-1 / self.step * (self.mu[None, :] - norm_ij[:, None]).pow(2)
)
d_ik_emb = torch.exp(
-1 / self.step * (self.mu[None, :] - norm_ik[:, None]).pow(2)
)
inputs = torch.cat(
(
h[triplets.src],
h[triplets.dst_i],
h[triplets.dst_j],
d_ij_emb,
d_ik_emb,
cos_ijk.unsqueeze(1),
sin_ijk.unsqueeze(1),
),
dim=1,
)
return self.mlp(inputs)
class UpdateFeatures(nn.GRU):
def __init__(self, features: int):
super(UpdateFeatures, self).__init__(features, features, 1, batch_first=False)
def forward(self, h: torch.FloatTensor, mi: torch.FloatTensor):
_, h_prime = super().forward(mi.unsqueeze(0), h.unsqueeze(0))
return h_prime.squeeze(0)
class Actions(nn.Module):
def __init__(
self,
features: int,
knn: int,
ops_config: dict,
scale_k: float,
hidden_dim: int,
n_layers: int,
limit_actions: float,
reduce_rho: str,
):
super(Actions, self).__init__()
self.ops = make_operator(ops_config)
self.knn = knn
self.limit_actions = limit_actions
self.reduce_rho = reduce_rho
self.interact_edges = EdgeProj(
features,
output_dim=self.ops.edges_dim,
hidden_dim=hidden_dim,
n_layers=n_layers,
bias=True,
)
self.interact_triplets = FaceProj(
features,
output_dim=self.ops.triplets_dim,
hidden_dim=hidden_dim,
n_layers=n_layers,
bias=True,
)
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.scale_k = scale_k
self.act_scale = nn.Tanh()
self.reset_parameters()
def reset_parameters(self):
self.interact_edges.reset_parameters()
self.interact_triplets.reset_parameters()
def apply(
self,
geometry: Geometry,
edges_weights: torch.FloatTensor,
triplets_weights: torch.FloatTensor,
check_tensor: bool = True,
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.LongTensor]:
# type checking and size evaluation
if check_tensor:
shapes = assert_tensor_match(
(geometry.cell, shape("b", 3, 3, dtype=torch.float32)),
(geometry.batch, shape("n", dtype=torch.long)),
(geometry.edges.src, shape("e", dtype=torch.long)),
(geometry.edges.dst, shape("e", dtype=torch.long)),
(geometry.edges.cell, shape("e", 3, dtype=torch.long)),
(geometry.triplets.src, shape("t", dtype=torch.long)),
(geometry.triplets.dst_i, shape("t", dtype=torch.long)),
(geometry.triplets.cell_i, shape("t", 3, dtype=torch.long)),
(geometry.triplets.dst_j, shape("t", dtype=torch.long)),
(geometry.triplets.cell_j, shape("t", 3, dtype=torch.long)),
)
else:
shapes = build_shapes(
{
"b": geometry.cell.shape[0],
"n": geometry.batch.shape[0],
"e": geometry.edges.src.shape[0],
"t": geometry.triplets.src.shape[0],
}
)
# calculating actions
edges_ops, triplets_ops = self.ops.forward(geometry)
# aggregation
if edges_ops is not None:
weighted_ops = (edges_ops * edges_weights[:, :, None, None]).sum(dim=1)
actions_edges = scatter(
weighted_ops,
geometry.batch_edges,
dim=0,
dim_size=shapes.b,
reduce=self.reduce_rho,
)
else:
actions_edges = None
if triplets_ops is not None:
weighted_ops = (triplets_ops * triplets_weights[:, :, None, None]).sum(
dim=1
)
actions_triplets = scatter(
weighted_ops,
geometry.batch_triplets,
dim=0,
dim_size=shapes.b,
reduce=self.reduce_rho,
)
else:
actions_triplets = None
# action
if (actions_edges is not None) and (actions_triplets is not None):
actions_rho = actions_edges + actions_triplets
elif actions_edges is not None:
actions_rho = actions_edges
elif actions_triplets is not None:
actions_rho = actions_triplets
if self.limit_actions != 0.0:
actions_rho = self.limit_actions * torch.tanh(
actions_rho / self.limit_actions
)
actions_rho = self.I + actions_rho
rho_prime = torch.bmm(actions_rho, geometry.cell)
return rho_prime, actions_rho
def forward(
self,
geometry: Geometry,
h: torch.FloatTensor,
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
if self.ops.edges_dim > 0:
edges_weights = self.interact_edges(
h, geometry.edges.src, geometry.edges.dst, geometry.edges_r_ij
)
else:
edges_weights = None
if self.ops.triplets_dim > 0:
triplets_weights = self.interact_triplets(
h,
geometry.triplets,
geometry.triplets_r_ij,
geometry.triplets_r_ik,
geometry.triplets_cos_ijk,
geometry.triplets_sin_ijk,
)
else:
triplets_weights = None
if self.scale_k != 0.0:
if edges_weights is not None:
edges_weights = self.scale_k * self.act_scale(edges_weights)
if triplets_weights is not None:
triplets_weights = self.scale_k * self.act_scale(triplets_weights)
return edges_weights, triplets_weights
class MPNN(nn.Module):
def __init__(self, features: int):
super(MPNN, self).__init__()
self.message_f = EdgeProj(
features, hidden_dim=features, n_layers=0, output_dim=features
)
self.update_f = UpdateFeatures(features)
self.reset_parameters()
def reset_parameters(self):
self.message_f.reset_parameters()
self.update_f.reset_parameters()
def forward(self, geometry: Geometry, h: torch.FloatTensor):
# message passing
mij = self.message_f(
h, geometry.edges.src, geometry.edges.dst, geometry.edges_r_ij
)
mi = scatter(mij, geometry.edges.src, dim=0, reduce="mean", dim_size=h.shape[0])
h_prime = self.update_f(h, mi)
return h_prime
| 10,092 | 29.492447 | 88 | py |
pegnn | pegnn-master/src/models/layers/operator/operator.py | import torch
import torch.nn as nn
from src.utils.geometry import Geometry
from src.models.layers.operator.grad import Grad
import enum
from typing import List
import abc
class Operator(nn.Module):
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
super().__init__()
self.operators_edges = operators_edges
self.operators_triplets = operators_triplets
self.normalize = normalize
@property
def edges_dim(self):
return len(self.operators_edges)
@property
def triplets_dim(self):
return len(self.operators_triplets)
def forward(self, geometry):
raise NotImplementedError
class OpKetBra(Operator):
class AbstractOperator(metaclass=abc.ABCMeta):
@abc.abstractclassmethod
def op(
self, u: torch.FloatTensor, v: torch.FloatTensor = None
) -> torch.FloatTensor:
pass
class OperatorVij(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(u.unsqueeze(2), u.unsqueeze(1))
class OperatorVik(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(v.unsqueeze(2), v.unsqueeze(1))
class OperatorVijk(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(u.unsqueeze(2), v.unsqueeze(1))
class OperatorVikj(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(v.unsqueeze(2), u.unsqueeze(1))
class OperatorVijkSym(AbstractOperator):
def op(self, u, v=None):
return 0.5 * (
torch.bmm(u.unsqueeze(2), v.unsqueeze(1))
+ torch.bmm(v.unsqueeze(2), u.unsqueeze(1))
)
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
assert isinstance(normalize, bool)
assert isinstance(operators_edges, set)
assert isinstance(operators_triplets, set)
for op in operators_edges:
assert isinstance(op, (OpKetBra.OperatorVij,))
for op in operators_triplets:
assert isinstance(op, OpKetBra.AbstractOperator)
super().__init__(
operators_edges=operators_edges,
operators_triplets=operators_triplets,
normalize=normalize,
)
def forward(self, geometry):
edges_ops = []
triplets_ops = []
for op in self.operators_edges:
if self.normalize:
edges_ops.append(op.op(geometry.edges_u_ij))
else:
edges_ops.append(op.op(geometry.edges_v_ij))
for op in self.operators_triplets:
if self.normalize:
triplets_ops.append(
op.op(geometry.triplets_u_ij, geometry.triplets_u_ik)
)
else:
triplets_ops.append(
op.op(geometry.triplets_v_ij, geometry.triplets_v_ik)
)
if len(edges_ops) > 0:
edges_ops = torch.stack(edges_ops, dim=1)
else:
edges_ops = None
if len(triplets_ops) > 0:
triplets_ops = torch.stack(triplets_ops, dim=1)
else:
triplets_ops = None
return edges_ops, triplets_ops
class OpSymSkew(Operator):
class AbstractOperator(metaclass=abc.ABCMeta):
@abc.abstractclassmethod
def op(
self, u: torch.FloatTensor, v: torch.FloatTensor = None
) -> torch.FloatTensor:
pass
class OperatorVij(AbstractOperator):
def op(self, u, v=None):
return u[:, :, None]+u[:, None, :]
class OperatorVik(AbstractOperator):
def op(self, u, v=None):
return v[:, :, None]+v[:, None, :]
class OperatorVijk(AbstractOperator):
def op(self, u, v=None):
return u[:, :, None]+v[:, None, :]
class OperatorVikj(AbstractOperator):
def op(self, u, v=None):
return v[:, :, None]+u[:, None, :]
class OperatorVijkSym(AbstractOperator):
def op(self, u, v=None):
return 0.5 * (
u[:, :, None]+v[:, None, :] + v[:, :, None]+u[:, None, :]
)
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
assert isinstance(normalize, bool)
assert isinstance(operators_edges, set)
assert isinstance(operators_triplets, set)
for op in operators_edges:
assert isinstance(op, (OpSymSkew.OperatorVij,))
for op in operators_triplets:
assert isinstance(op, OpSymSkew.AbstractOperator)
super().__init__(
operators_edges=operators_edges,
operators_triplets=operators_triplets,
normalize=normalize,
)
def forward(self, geometry):
edges_ops = []
triplets_ops = []
for op in self.operators_edges:
if self.normalize:
edges_ops.append(op.op(geometry.edges_u_ij))
else:
edges_ops.append(op.op(geometry.edges_v_ij))
for op in self.operators_triplets:
if self.normalize:
triplets_ops.append(
op.op(geometry.triplets_u_ij, geometry.triplets_u_ik)
)
else:
triplets_ops.append(
op.op(geometry.triplets_v_ij, geometry.triplets_v_ik)
)
if len(edges_ops) > 0:
edges_ops = torch.stack(edges_ops, dim=1)
else:
edges_ops = None
if len(triplets_ops) > 0:
triplets_ops = torch.stack(triplets_ops, dim=1)
else:
triplets_ops = None
return edges_ops, triplets_ops
class OpGrad(Operator):
class AbstractOperator(metaclass=abc.ABCMeta):
def __init__(self):
self.grad = None
def set_grad(self, grad):
self.grad = grad
@abc.abstractclassmethod
def op(
self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None
) -> torch.FloatTensor:
pass
class OperatorNormij(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance(cell, x_ij)[0]
class OperatorNormijSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance_sym(cell, x_ij)[0]
class OperatorNormik(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance(cell, x_ik)[0]
class OperatorNormikSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance_sym(cell, x_ik)[0]
class OperatorAngle(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_angle(cell, x_ij, x_ik)[0]
class OperatorAngleSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_angle_sym(cell, x_ij, x_ik)[0]
class OperatorArea(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_area(cell, x_ij, x_ik)[0]
class OperatorAreaSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_area_sym(cell, x_ij, x_ik)[0]
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
assert isinstance(normalize, bool)
assert isinstance(operators_edges, set)
assert isinstance(operators_triplets, set)
for op in operators_edges:
assert isinstance(
op, (OpGrad.OperatorNormij, OpGrad.OperatorNormijSym))
for op in operators_triplets:
assert isinstance(op, OpGrad.AbstractOperator)
super().__init__(
operators_edges=operators_edges,
operators_triplets=operators_triplets,
normalize=normalize,
)
self.grad = Grad()
for op in self.operators_edges:
op.set_grad(self.grad)
for op in self.operators_triplets:
op.set_grad(self.grad)
def forward(self, geometry):
edges_ops = []
triplets_ops = []
for op in self.operators_edges:
edges_ops.append(
op.op(geometry.cell[geometry.batch_edges], geometry.edges_e_ij))
for op in self.operators_triplets:
triplets_ops.append(
op.op(geometry.cell[geometry.batch_triplets],
geometry.triplets_e_ij, geometry.triplets_e_ik)
)
if len(edges_ops) > 0:
edges_ops = torch.stack(edges_ops, dim=1)
else:
edges_ops = None
if len(triplets_ops) > 0:
triplets_ops = torch.stack(triplets_ops, dim=1)
else:
triplets_ops = None
return edges_ops, triplets_ops
def make_operator(config):
assert "type" in config
assert "normalize" in config
assert "edges" in config
assert "triplets" in config
assert config["type"] in ["ket-bra", "sym-skew", "grad"]
if config["type"] == "ket-bra":
operators_edges = set()
operators_triplets = set()
ops_dict = {
"v_ij": OpKetBra.OperatorVij,
"v_ik": OpKetBra.OperatorVik,
"v_ijk": OpKetBra.OperatorVijk,
"v_ikj": OpKetBra.OperatorVikj,
"v_ijk_sym": OpKetBra.OperatorVijkSym,
}
for op in config["edges"]:
assert op in ["v_ij"]
operators_edges.add(ops_dict[op]())
for op in config["triplets"]:
assert op in ops_dict
operators_triplets.add(ops_dict[op]())
ops = OpKetBra(
operators_edges, operators_triplets, normalize=config["normalize"]
)
elif config["type"] == "sym-skew":
operators_edges = set()
operators_triplets = set()
ops_dict = {
"v_ij": OpSymSkew.OperatorVij,
"v_ik": OpSymSkew.OperatorVik,
"v_ijk": OpSymSkew.OperatorVijk,
"v_ikj": OpSymSkew.OperatorVikj,
"v_ijk_sym": OpSymSkew.OperatorVijkSym,
}
for op in config["edges"]:
assert op in ["v_ij"]
operators_edges.add(ops_dict[op]())
for op in config["triplets"]:
assert op in ops_dict
operators_triplets.add(ops_dict[op]())
ops = OpSymSkew(
operators_edges, operators_triplets, normalize=config["normalize"]
)
elif config["type"] == "grad":
operators_edges = set()
operators_triplets = set()
ops_dict = {
"n_ij": OpGrad.OperatorNormij,
"n_ij_sym": OpGrad.OperatorNormijSym,
"n_ik": OpGrad.OperatorNormik,
"n_ik_sym": OpGrad.OperatorNormikSym,
"angle": OpGrad.OperatorAngle,
"angle_sym": OpGrad.OperatorAngleSym,
"area": OpGrad.OperatorArea,
"area_sym": OpGrad.OperatorAreaSym,
}
for op in config["edges"]:
assert op in ["n_ij", "n_ij_sym"]
operators_edges.add(ops_dict[op]())
for op in config["triplets"]:
assert op in ops_dict
operators_triplets.add(ops_dict[op]())
ops = OpGrad(
operators_edges, operators_triplets, normalize=config["normalize"]
)
return ops
| 11,933 | 31.254054 | 103 | py |
pegnn | pegnn-master/src/models/layers/operator/grad_unittest.py | import torch
import torch.nn as nn
from torch.autograd.functional import jacobian
from .grad import Grad
import unittest
import time
class TestGrad(unittest.TestCase):
batch_size = 1024
verbose = True
def log(self, *args, **kwargs):
if TestGrad.verbose:
print(*args, **kwargs)
def assertAlmostEqualsTensors(self, x, y, places):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
if not isinstance(y, torch.Tensor):
y = torch.tensor(y)
max_error = (x-y).abs().max().item()
self.log(f"max error: {max_error:.5f}")
self.assertAlmostEqual(max_error, 0, places)
def batched_jacobian(self, fn, inputs):
lst_grad = [[] for _ in inputs]
for vars in zip(*inputs):
grad_vars = jacobian(fn, vars)
for idx, g in enumerate(grad_vars):
lst_grad[idx].append(g)
for idx, g in enumerate(lst_grad):
lst_grad[idx] = torch.stack(g, dim=0)
return tuple(lst_grad)
def test_norm(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_x,) = self.batched_jacobian(lambda u: u.norm(), (x,))
grad_x = grad_fn.jacobian_norm(x)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=6)
def test_dot(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size, 3)
y = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_x, gt_grad_y) = self.batched_jacobian(
lambda x, y: x.dot(y), (x, y))
grad_x, grad_y = grad_fn.jacobian_dot(x, y)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=6)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=6)
def test_cross_norm(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size, 3)
y = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_x, gt_grad_y) = self.batched_jacobian(
lambda x, y: torch.cross(x, y).norm(), (x, y))
grad_x, grad_y = grad_fn.jacobian_cross_norm(x, y)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=6)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=6)
def test_matrix_vector(self):
torch.manual_seed(0)
m = torch.randn(TestGrad.batch_size, 3, 3)
u = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_m, gt_grad_u) = self.batched_jacobian(
lambda x, y: x @ y, (m, u))
grad_m, grad_u = grad_fn.jacobian_mu(m, u)
self.assertAlmostEqualsTensors(gt_grad_m, grad_m, places=6)
self.assertAlmostEqualsTensors(gt_grad_u, grad_u, places=6)
grad_m = grad_fn.jacobian_m(u)
self.assertAlmostEqualsTensors(gt_grad_m, grad_m, places=6)
def test_atan2(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size)
y = torch.randn(TestGrad.batch_size)
grad_fn = Grad()
(gt_grad_y, gt_grad_x) = self.batched_jacobian(
lambda y, x: torch.atan2(y, x), (y, x))
grad_y, grad_x = grad_fn.jacobian_atan2(y, x)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=5)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=5)
def test_atan2(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size)
y = torch.randn(TestGrad.batch_size)
grad_fn = Grad()
(gt_grad_y, gt_grad_x) = self.batched_jacobian(
lambda y, x: torch.atan2(y, x), (y, x))
grad_y, grad_x = grad_fn.jacobian_atan2(y, x)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=5)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=5)
def test_angle_vector(self):
torch.manual_seed(0)
u = torch.randn(TestGrad.batch_size, 3)
v = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_u, gt_grad_v) = self.batched_jacobian(
lambda x, y: torch.atan2(torch.cross(x, y).norm(), x.dot(y)),
(u, v))
grad_u, grad_v = grad_fn.jacobian_angle_vector(u, v)
self.assertAlmostEqualsTensors(gt_grad_u, grad_u, places=5)
self.assertAlmostEqualsTensors(gt_grad_v, grad_v, places=5)
def test_distance(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
def get_distance(g, rho, xij):
u = g @ rho @ xij
return u.norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j = grad_fn.grad_distance(
rho, x_ij, g=g)
t1 = time.time()
self.log(f"grad distance {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij) = self.batched_jacobian(
lambda g, rho, xij: get_distance(
g, rho, xij), (g, rho, x_ij)
)
gt_grad_x_i = -gt_grad_x_ij
gt_grad_x_j = gt_grad_x_ij
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=3)
def test_distance_sym(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
def get_distance(g, rho, xij):
u = (g + g.t()) @ rho @ xij
return u.norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j = grad_fn.grad_distance_sym(
rho, x_ij, g=g)
t1 = time.time()
self.log(f"grad distance sym {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij) = self.batched_jacobian(
lambda g, rho, xij: get_distance(
g, rho, xij), (g, rho, x_ij)
)
gt_grad_x_i = -gt_grad_x_ij
gt_grad_x_j = gt_grad_x_ij
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=3)
def test_area(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_area(g, rho, xij, xik):
u = g @ rho @ xij
v = g @ rho @ xik
return 0.5 * torch.cross(u, v).norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_area(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad area {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_area(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=2)
def test_area_sym(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_area(g, rho, xij, xik):
u = (g + g.t()) @ rho @ xij
v = (g + g.t()) @ rho @ xik
return 0.5 * torch.cross(u, v).norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_area_sym(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad area sym {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_area(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=2)
def test_angle(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_angle(g, rho, xij, xik):
u = g @ rho @ xij
v = g @ rho @ xik
return torch.atan2(torch.cross(u, v).norm(), u.dot(v))
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_angle(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad angle {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_angle(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=4)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=4)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=4)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=4)
def test_angle_sym(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_angle(g, rho, xij, xik):
u = (g + g.t()) @ rho @ xij
v = (g + g.t()) @ rho @ xik
return torch.atan2(torch.cross(u, v).norm(), u.dot(v))
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_angle_sym(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad angle sym {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_angle(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=3)
if __name__ == "__main__":
unittest.main()
| 11,560 | 31.566197 | 75 | py |
pegnn | pegnn-master/src/models/layers/operator/grad.py | import torch
import torch.nn as nn
class Grad(nn.Module):
def __init__(self):
super().__init__()
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.K = nn.Parameter(torch.tensor([[[0, 0, 0], [0, 0, 1], [0, -1, 0]], [[0, 0, -1], [0, 0, 0], [
1, 0, 0]], [[0, 1, 0], [-1, 0, 0], [0, 0, 0]]], dtype=torch.float32), requires_grad=False)
def jacobian_atan2(self, y, x):
diff_x = -y / (x ** 2 + y ** 2)
diff_y = x / (x ** 2 + y ** 2)
return diff_y, diff_x
def jacobian_dot(self, x, y):
return y.clone(), x.clone()
def jacobian_norm(self, x):
return x / x.norm(dim=1)[:, None]
def jacobian_cross_norm(self, x, y):
diff_cross_x = (self.K[None]*y[:, None, None, :]).sum(dim=3)
diff_cross_y = -(self.K[None]*x[:, None, None, :]).sum(dim=3)
diff_norm = self.jacobian_norm(torch.cross(x, y))
diff_x = torch.bmm(diff_norm.unsqueeze(1), diff_cross_x).squeeze(1)
diff_y = torch.bmm(diff_norm.unsqueeze(1), diff_cross_y).squeeze(1)
return diff_x, diff_y
def jacobian_m(self, u):
diff_m = self.I[None, :, :, None]*u[:, None, None, :]
return diff_m
def jacobian_mu(self, m, u):
diff_m = self.I[None, :, :, None]*u[:, None, None, :]
diff_u = m.clone()
return diff_m, diff_u
def jacobian_angle_vector(self, u, v):
diff_atan2_y, diff_atan2_x = self.jacobian_atan2(
torch.cross(u, v).norm(dim=1), (u*v).sum(dim=1))
diff_cross_norm_u, diff_cross_norm_v = self.jacobian_cross_norm(u, v)
diff_dot_u, diff_dot_v = self.jacobian_dot(u, v)
diff_u = diff_atan2_y[:, None] * diff_cross_norm_u + \
diff_atan2_x[:, None] * diff_dot_u
diff_v = diff_atan2_y[:, None] * diff_cross_norm_v + \
diff_atan2_x[:, None] * diff_dot_v
return diff_u, diff_v
def grad_distance(self, rho, x_ij, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm(g, rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
diff_u = self.jacobian_norm(u)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g = torch.einsum("bi,bijk->bjk", diff_u, diff_g_u)
diff_x = torch.bmm(diff_u.unsqueeze(1), rho_prime).squeeze(1)
diff_x_i = -diff_x
diff_x_j = diff_x
return diff_g, diff_x_i, diff_x_j
def grad_distance_sym(self, rho, x_ij, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm((g + torch.transpose(g, 1, 2)), rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
diff_u = self.jacobian_norm(u)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_demi = torch.einsum("bi,bijk->bjk", diff_u, diff_g_u)
diff_g = diff_g_demi + torch.transpose(diff_g_demi, 1, 2)
diff_x = torch.bmm(diff_u.unsqueeze(1), rho_prime).squeeze(1)
diff_x_i = -diff_x
diff_x_j = diff_x
return diff_g, diff_x_i, diff_x_j
def grad_area(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm(g, rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_cross_norm(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g = 0.5 * (torch.einsum("bi,bijk->bjk", diff_u, diff_g_u) +
torch.einsum("bi,bijk->bjk", diff_v, diff_g_v))
diff_vect = rho_prime
diff_x_i = -0.5 * (torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = 0.5 * torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = 0.5 * torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
def grad_area_sym(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm((g + torch.transpose(g, 1, 2)), rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_cross_norm(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g_demi = torch.einsum(
"bi,bijk->bjk", diff_u, diff_g_u)+torch.einsum("bi,bijk->bjk", diff_v, diff_g_v)
diff_g = 0.5 * (diff_g_demi + torch.transpose(diff_g_demi, 1, 2))
diff_vect = rho_prime
diff_x_i = -0.5 * (torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = 0.5 * torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = 0.5 * torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
def grad_angle(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm(g, rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_angle_vector(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g = (torch.einsum("bi,bijk->bjk", diff_u, diff_g_u) +
torch.einsum("bi,bijk->bjk", diff_v, diff_g_v))
diff_vect = rho_prime
diff_x_i = -(torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
def grad_angle_sym(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm((g + torch.transpose(g, 1, 2)), rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_angle_vector(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g_demi = (torch.einsum("bi,bijk->bjk", diff_u, diff_g_u) +
torch.einsum("bi,bijk->bjk", diff_v, diff_g_v))
diff_g = diff_g_demi + torch.transpose(diff_g_demi, 1, 2)
diff_vect = rho_prime
diff_x_i = -(torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
| 7,661 | 36.014493 | 120 | py |
pegnn | pegnn-master/src/datasets/data.py | from __future__ import annotations
import torch
import torch.nn.functional as F
from torch_geometric.data import Data
class CrystalData(Data):
def __init__(self, *args, **kwargs):
if "pos_cart" in kwargs:
assert isinstance(kwargs["cell"], torch.FloatTensor)
assert isinstance(kwargs["pos_cart"], torch.FloatTensor)
assert isinstance(kwargs["num_atoms"], torch.LongTensor)
cell = kwargs["cell"]
pos_cart = kwargs["pos_cart"]
num_atoms = kwargs["num_atoms"]
batch = torch.arange(
num_atoms.shape[0], dtype=torch.long, device=num_atoms.device
).repeat_interleave(num_atoms)
pos = (
torch.matmul(pos_cart.unsqueeze(1), torch.inverse(cell)[batch]).squeeze(
1
)
% 1
)
kwargs["pos"] = pos
del kwargs["pos_cart"]
super(CrystalData, self).__init__(*args, **kwargs)
self._pos_cart = None
@property
def cell(self) -> torch.FloatTensor:
return super(CrystalData, self).cell
def set_cell(self, cell):
self.cell = cell
self._pos_cart = None
@property
def pos(self) -> torch.FloatTensor:
return super(CrystalData, self).pos
def set_pos(self, pos: torch.FloatTensor):
self.pos = pos % 1
self._pos_cart = None
@property
def device(self) -> torch.device:
return self.cell.device
@property
def cell_lengths(self) -> torch.FloatTensor:
return self.cell.norm(dim=2).t()
@property
def cell_angles(self) -> torch.FloatTensor:
angles = torch.zeros_like(self.cell_lengths)
i = torch.tensor([0, 1, 2], dtype=torch.long, device=self.device)
j = torch.tensor([1, 2, 0], dtype=torch.long, device=self.device)
k = torch.tensor([2, 0, 1], dtype=torch.long, device=self.device)
cross = torch.cross(self.cell[:, j], self.cell[:, k], dim=2)
dot = (self.cell[:, j] * self.cell[:, k]).sum(dim=2)
angles[i, :] = torch.rad2deg(torch.atan2(cross.norm(dim=2), dot).t())
inv_mask = (cross * self.cell[:, i]).sum(dim=2) < 0
angles[inv_mask.t()] *= -1
return angles
@property
def pos_cart(self) -> torch.FloatTensor:
if self._pos_cart is None:
self._pos_cart = torch.matmul(
self.pos.unsqueeze(1), self.cell[self.batch]
).squeeze(1)
return self._pos_cart
def set_pos_cart(self, pos_cart: torch.FloatTensor, keep_inside: bool = True):
pos = torch.matmul(
pos_cart.unsqueeze(1), torch.inverse(self.cell)[self.batch]
).squeeze(1)
if keep_inside:
pos %= 1.0
self.pos = pos
self._pos_cart = None
| 2,855 | 27.848485 | 88 | py |
pegnn | pegnn-master/src/datasets/csv_dataset.py | from typing import Iterator
from torch_geometric.data import InMemoryDataset, Data
from torch_geometric.loader import DataLoader
import torch
import pandas as pd
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.io.ase import AseAtomsAdaptor
from ase.neighborlist import neighbor_list
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
from .data import CrystalData
from src.models.layers.random import RandomMatrixSL3Z, apply_sl3z
import multiprocessing as mp
import warnings
import os
import json
def process_cif(args):
(cif, warning_queue) = args
with warnings.catch_warnings(record=True) as ws:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
struct = Structure.from_str(cif, fmt="cif")
if warning_queue is not None:
for w in ws:
warning_queue.put((hash(str(w.message)), w))
lengths = np.array(struct.lattice.abc, dtype=np.float32)
angles = np.array(struct.lattice.angles, dtype=np.float32)
atoms = AseAtomsAdaptor.get_atoms(struct)
atoms.set_scaled_positions(atoms.get_scaled_positions(wrap=True))
assert (0 <= atoms.get_scaled_positions()).all() and (
atoms.get_scaled_positions() < 1).all()
cell = atoms.cell.array.astype(np.float32)
z = np.array(struct.atomic_numbers, dtype=np.long)
pos = struct.frac_coords.astype(np.float32)
data = {
"lattice": cell,
"lengths": lengths,
"angles": angles,
"z": z,
"pos": pos
}
return data, np.unique(z), pos.shape[0]
class CSVDataset(InMemoryDataset):
def __init__(self, csv_file: str, warn: bool = False, multithread: bool = True, verbose: bool = True, noise_scale: float = 0.1, knn: float = 8, sl3z_aug: bool = False):
super().__init__()
self._raw_file_names = [csv_file]
df = pd.read_csv(csv_file)
if warn:
m = mp.Manager()
warning_queue = m.Queue()
else:
warning_queue = None
iterator = [(row["cif"], warning_queue)
for _, row in df.iterrows()]
if multithread:
if verbose:
result = process_map(
process_cif, iterator, desc=f"loading dataset {csv_file}", chunksize=8)
else:
with mp.Pool(mp.cpu_count()) as p:
result = p.map(process_cif, iterator)
else:
result = []
if verbose:
iterator = tqdm(
iterator, desc=f"loading dataset {csv_file}", total=len(df))
for args in iterator:
result.append(process_cif(args))
if warn:
warnings_type = {}
while not warning_queue.empty():
key, warning = warning_queue.get()
if key not in warnings_type:
warnings_type[key] = warning
for w in warnings_type.values():
warnings.warn_explicit(
w.message, category=w.category, filename=w.filename, lineno=w.lineno
)
self._elements = set(
np.unique(np.concatenate([z for _, z, _ in result])))
size = np.array([s for _, _, s in result])
max_size = np.max(size)
min_size = np.min(size)
self.data = [c for c, _, _ in result]
if verbose:
print(
f"dataset statistics: count={len(self.data)}, min={min_size}, max={max_size}")
@ property
def raw_file_names(self):
return self._raw_file_names
@ property
def processed_file_names(self):
return []
def len(self) -> int:
return len(self.data)
def get_sample_size(self, idx: int) -> int:
return len(self.data[idx]["z"])
def get(self, idx: int) -> Data:
lattice = torch.from_numpy(self.data[idx]["lattice"]).unsqueeze(0)
z = torch.from_numpy(self.data[idx]["z"])
pos = torch.from_numpy(self.data[idx]["pos"])
return CrystalData(
z=z,
pos=pos,
cell=lattice,
num_atoms=z.shape[0]
)
| 4,203 | 28.194444 | 172 | py |
pegnn | pegnn-master/src/utils/scaler.py | import torch
import torch.nn as nn
import numpy as np
from torch_geometric.loader import DataLoader
import tqdm
from src.utils.geometry import Geometry
from typing import Tuple
class LatticeScaler(nn.Module):
def __init__(self):
super(LatticeScaler, self).__init__()
self.mean = nn.Parameter(
torch.zeros(6, dtype=torch.float32), requires_grad=False
)
self.std = nn.Parameter(torch.ones(
6, dtype=torch.float32), requires_grad=False)
def get_lattices_parameters(
self, lattices: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
lengths = lattices.norm(dim=2)
i = torch.tensor([0, 1, 2], dtype=torch.long, device=lattices.device)
j = torch.tensor([1, 2, 0], dtype=torch.long, device=lattices.device)
k = torch.tensor([2, 0, 1], dtype=torch.long, device=lattices.device)
cross = torch.cross(lattices[:, j], lattices[:, k], dim=2)
dot = (lattices[:, j] * lattices[:, k]).sum(dim=2)
angles = torch.atan2(cross.norm(dim=2), dot) * 180 / torch.pi
inv_mask = (cross * lattices[:, i]).sum(dim=2) < 0
angles[inv_mask] *= -1
return lengths, angles
def get_lattices(
self, lengths: torch.FloatTensor, angles: torch.FloatTensor
) -> torch.FloatTensor:
"""Converts lattice from abc, angles to matrix.
https://github.com/materialsproject/pymatgen/blob/b789d74639aa851d7e5ee427a765d9fd5a8d1079/pymatgen/core/lattice.py#L311
"""
a, b, c = lengths
alpha, beta, gamma = angles
angles_r = torch.deg2rad(torch.tensor([alpha, beta, gamma]))
cos_alpha, cos_beta, cos_gamma = np.cos(angles_r)
sin_alpha, sin_beta, sin_gamma = np.sin(angles_r)
val = (cos_alpha * cos_beta - cos_gamma) / (sin_alpha * sin_beta)
# Sometimes rounding errors result in values slightly > 1.
# val = max(min(val, val), -val)
gamma_star = torch.arccos(val)
vector_a = [a * sin_beta, 0.0, a * cos_beta]
vector_b = [
-b * sin_alpha * np.cos(gamma_star),
b * sin_alpha * np.sin(gamma_star),
b * cos_alpha,
]
vector_c = [0.0, 0.0, float(c)]
return torch.tensor([vector_a, vector_b, vector_c])
@torch.no_grad()
def fit(self, dataloader: DataLoader, verbose: bool = True):
lengths, angles = [], []
if verbose:
iterator = tqdm.tqdm(
dataloader, desc="calculating normalization paremeters")
else:
iterator = dataloader
for batch in iterator:
current_lengths, current_angles = self.get_lattices_parameters(
batch.cell)
lengths.append(current_lengths)
angles.append(current_angles)
lengths = torch.cat(lengths, dim=0)
angles = torch.cat(angles, dim=0)
params = torch.cat((lengths, angles), dim=1)
self.mean.data = params.mean(dim=0)
self.std.data = params.std(dim=0)
def normalise_lattice(
self, lattices: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
lengths, angles = self.get_lattices_parameters(lattices)
lengths_scaled = (lengths - self.mean[:3]) / (self.std[:3]+1e-6)
angles_scaled = (angles - self.mean[3:]) / (self.std[3:]+1e-6)
return lengths_scaled, angles_scaled
def normalise(
self, lengths: torch.FloatTensor, angles: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
lengths_scaled = (lengths - self.mean[:3]) / (self.std[:3]+1e-6)
angles_scaled = (angles - self.mean[3:]) / (self.std[3:]+1e-6)
return lengths_scaled, angles_scaled
def denormalise(
self, lengths: torch.FloatTensor, angles: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
lengths_scaled = lengths * self.std[:3] + self.mean[:3]
angles_scaled = angles * self.std[3:] + self.mean[3:]
return lengths_scaled, angles_scaled
class BondsScaler(nn.Module):
def __init__(self, knn: int = 8):
super().__init__()
self.knn = knn
self.edges_mean = nn.Parameter(
torch.zeros(1, dtype=torch.float32), requires_grad=False
)
self.edges_std = nn.Parameter(
torch.ones(1, dtype=torch.float32), requires_grad=False
)
self.triplets_mean = nn.Parameter(
torch.zeros(3, dtype=torch.float32), requires_grad=False
)
self.triplets_std = nn.Parameter(
torch.ones(3, dtype=torch.float32), requires_grad=False
)
@property
def device(self):
return self.edges_mean.data.device
@torch.no_grad()
def fit(self, dataloader: DataLoader):
edges, triplets = [], []
for batch in tqdm.tqdm(dataloader):
batch = batch.to(self.device)
geometry = Geometry(batch.cell, batch.num_atoms,
batch.pos, knn=self.knn)
edges.append(geometry.edges_n_ij)
triplets.append(
torch.stack(
(
geometry.triplets_cos_ijk,
geometry.triplets_n_ij,
geometry.triplets_n_ik,
),
dim=1,
)
)
edges = torch.cat(edges, dim=0)
triplets = torch.cat(triplets, dim=0)
self.edges_mean.data = edges.mean()
self.edges_std.data = edges.std()
self.triplets_mean.data = triplets.mean(dim=0)
self.triplets_std.data = triplets.std(dim=0)
def normalize(
self, edges: torch.FloatTensor, triplets: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
edges_scaled = (
edges - self.edges_mean.data[None]) / self.edges_std.data[None]
triplets_scaled = (
triplets - self.triplets_mean.data[None]
) / self.triplets_std.data[None]
return edges_scaled, triplets_scaled
def denormalize(
self, edges: torch.FloatTensor, triplets: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
edges_scaled = edges * \
self.edges_std.data[None] + self.edges_mean.data[None]
triplets_scaled = (
triplets * self.triplets_std.data[None] +
self.triplets_mean.data[None]
)
return edges_scaled, triplets_scaled
| 6,553 | 32.269036 | 128 | py |
pegnn | pegnn-master/src/utils/shape.py | import torch
from typing import Tuple, List, Union, Dict
from collections import namedtuple
class shape:
def __init__(self, *dim: Union[int, str], dtype=None):
assert isinstance(dim, tuple)
for d in dim:
assert (type(d) == int and -1 <= d) or type(d) == str
assert (dtype is None) or isinstance(dtype, torch.dtype)
self.dim = dim
self.dtype = dtype
def get_dim(self, dim: List[Union[int, str]], context: Dict[str, int] = {}):
dim_eval = []
for d in dim:
if type(d) == str and (d in context):
dim_eval.append(context[d])
else:
dim_eval.append(d)
return tuple(dim_eval)
def assert_match(self, x: torch.Tensor, context: Dict[str, int] = {}):
assert isinstance(x, torch.Tensor), "x is not a Tensor"
assert x.dim() == len(
self.dim
), f"the dimension of x should match with {self.dim}"
for x_dim, trg_dim in zip(x.shape, self.dim):
if (trg_dim is None) or (trg_dim == -1):
continue
if type(trg_dim) == str:
if trg_dim in context:
trg_dim = context[trg_dim]
else:
context[trg_dim] = x_dim
continue
assert (
x_dim == trg_dim
), f"the shape of x {tuple(x.shape)} should match with {self.get_dim(self.dim,context)}"
if self.dtype is not None:
assert (
x.dtype == self.dtype
), f"the data type of x ({x.dtype}) should match with {self.dtype}"
return context
def build_shapes(context: Dict[str, int]) -> namedtuple("shapes", tuple()):
return namedtuple("shapes", context.keys())(*context.values())
def assert_tensor_match(
*args: Tuple[torch.Tensor, shape]
) -> namedtuple("shapes", tuple()):
context = {}
for x, s in args:
context = s.assert_match(x, context=context)
return build_shapes(context)
| 2,051 | 27.901408 | 100 | py |
pegnn | pegnn-master/src/utils/polar.py | import torch
import unittest
__all__ = ["polar"]
def polar(a: torch.FloatTensor, side: str = "right"):
if side not in ["right", "left"]:
raise ValueError("`side` must be either 'right' or 'left'")
assert a.ndim == 3 and a.shape[1] == a.shape[2]
w, s, vh = torch.linalg.svd(a, full_matrices=False)
u = torch.bmm(w, vh)
if side == "right":
# a = up
p = torch.bmm(torch.transpose(vh, 1, 2).conj() * s[:, None], vh)
else:
# a = pu
p = torch.bmm(w * s[:, None], torch.transpose(w, 1, 2).conj())
mask = torch.where(torch.det(u) < 0, -1.0, 1.0)
u *= mask[:, None, None]
p *= mask[:, None, None]
return u, p
class TestPolar(unittest.TestCase):
def test_left(self):
import numpy as np
from scipy.linalg import polar as polat_gt
torch.manual_seed(0)
A = torch.matrix_exp(torch.randn(1 << 10, 3, 3))
R, K = polar(A, side="left")
for i in range(A.shape[0]):
R_gt, K_gt = polat_gt(A[i].numpy(), side="left")
R_gt = torch.from_numpy(R_gt)
K_gt = torch.from_numpy(K_gt)
if torch.det(R_gt) < 0:
R_gt -= R_gt
K_gt -= K_gt
self.assertAlmostEqual((R[i] - R_gt).abs().sum().item(), 0.0, places=4)
self.assertAlmostEqual((K[i] - K_gt).abs().sum().item(), 0.0, places=4)
def test_right(self):
import numpy as np
from scipy.linalg import polar as polat_gt
torch.manual_seed(0)
A = torch.matrix_exp(torch.randn(1 << 10, 3, 3))
R, K = polar(A, side="right")
for i in range(A.shape[0]):
R_gt, K_gt = polat_gt(A[i].numpy(), side="right")
R_gt = torch.from_numpy(R_gt)
K_gt = torch.from_numpy(K_gt)
if torch.det(R_gt) < 0:
R_gt -= R_gt
K_gt -= K_gt
self.assertAlmostEqual((R[i] - R_gt).abs().sum().item(), 0.0, places=4)
self.assertAlmostEqual((K[i] - K_gt).abs().sum().item(), 0.0, places=4)
def volume(x: torch.FloatTensor) -> torch.FloatTensor:
return (torch.cross(x[:, :, 0], x[:, :, 1]) * x[:, :, 2]).sum(dim=1).abs()
def volume2(x):
return torch.linalg.svd(x)[1].prod(dim=1).abs().detach()
if __name__ == "__main__":
from torch import tensor
calc_scale = 1.5
rho = tensor(
[
[-1.9330e-01, 3.3560e00, -2.1579e00],
[6.8199e01, -3.8512e02, 2.6373e02],
[-3.6272e01, 2.0426e02, -1.3885e02],
]
)
actions_rho = tensor(
[
[0.9919, 1.0756, -0.5697],
[1.0756, -143.5471, 76.5437],
[-0.5697, 76.5437, -39.5333],
]
)
action_normalize = tensor(
[
[0.1996, 0.2165, -0.1146],
[0.2165, -28.8877, 15.4038],
[-0.1146, 15.4038, -7.9557],
]
)
rho.unsqueeze_(0)
actions_rho.unsqueeze_(0)
action_normalize.unsqueeze_(0)
"""
print(
torch.cross(
tensor([-1.9330e-01, 6.8199e01, -3.6272e01]),
tensor([3.3560e00, -3.8512e02, 2.0426e02]),
)
.dot(tensor([-2.1579e00, 2.6373e02, -1.3885e02]))
.abs()
)
print(volume2(rho))
print(volume(rho))
exit()
"""
print(torch.linalg.matrix_rank(rho))
print(torch.linalg.matrix_rank(actions_rho, hermitian=True))
U = torch.linalg.svd(rho).U[0]
print(
torch.dot(U[:, 0], U[:, 1]),
torch.dot(U[:, 1], U[:, 2]),
torch.dot(U[:, 2], U[:, 0]),
)
U = torch.linalg.svd(actions_rho).U[0]
print(
torch.dot(U[:, 0], U[:, 1]),
torch.dot(U[:, 1], U[:, 2]),
torch.dot(U[:, 2], U[:, 0]),
)
print(volume(action_normalize) * volume(rho))
print(volume(torch.bmm(action_normalize, rho)))
print(volume2(torch.bmm(action_normalize, rho)))
# unittest.main()
| 3,936 | 25.782313 | 83 | py |
pegnn | pegnn-master/src/utils/encoder.py | import torch
import json
import numpy as np
from ase.spacegroup import Spacegroup
__all__ = ["CrystalEncoder"]
class CrystalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, torch.Tensor):
return obj.tolist()
if isinstance(obj, Spacegroup):
return {"number": obj.no, "symbol": obj.symbol}
# if isinstance(obj, tf.Tensor):
# return obj.numpy().tolist()
return json.JSONEncoder.default(self, obj)
| 561 | 27.1 | 59 | py |
pegnn | pegnn-master/src/utils/replay.py | import torch
class Replay:
def __init__(self, batch_size: int, max_depth: int = 32, proba_in: float = 0.1):
self.batch_size = batch_size
self.max_depth = max_depth
self.proba_in = proba_in
self.cell = torch.zeros(0, 3, 3, dtype=torch.float32)
self.pos = torch.zeros(0, 3, dtype=torch.float32)
self.z = torch.zeros(0, dtype=torch.float32)
self.num_atoms = torch.zeros(0, dtype=torch.long)
self.depth = 0
def push(self, cell, pos, z, num_atoms):
if torch.rand(1) < self.proba_in:
cell = cell.clone().detach().cpu()
pos = pos.clone().detach().cpu()
z = z.clone().detach().cpu()
num_atoms = num_atoms.clone().detach().cpu()
if self.depth < self.max_depth:
self.cell = torch.cat((self.cell, cell))
self.pos = torch.cat((self.pos, pos))
self.z = torch.cat((self.z, z))
self.num_atoms = torch.cat((self.num_atoms, num_atoms))
self.depth += 1
else:
struct_idx = torch.arange(
self.num_atoms.shape[0], device=self.num_atoms.device
)
batch = struct_idx.repeat_interleave(self.num_atoms)
remove = torch.randint(self.depth, (1,)) * self.batch_size
mask = (batch < remove) | ((remove + self.batch_size) <= batch)
self.cell = torch.cat(
(self.cell[:remove], self.cell[(remove + self.batch_size) :], cell)
)
self.pos = torch.cat((self.pos[mask], pos))
self.z = torch.cat((self.z[mask], z))
self.num_atoms = torch.cat(
(
self.num_atoms[:remove],
self.num_atoms[(remove + self.batch_size) :],
num_atoms,
)
)
def random(self, device="cpu"):
assert self.num_atoms.shape[0] > 0
struct_idx = torch.arange(self.num_atoms.shape[0], device=self.num_atoms.device)
batch = struct_idx.repeat_interleave(self.num_atoms)
idx = torch.randperm(self.num_atoms.shape[0])[: self.batch_size]
mask = (batch[:, None] == idx[None, :]).any(dim=1)
return (
self.cell[idx].to(device),
self.pos[mask].to(device),
self.z[mask].to(device),
self.num_atoms[idx].to(device),
)
| 2,510 | 36.477612 | 88 | py |
pegnn | pegnn-master/src/utils/geometry.py | import torch
import torch.nn.functional as F
from .shape import build_shapes, assert_tensor_match, shape
from .timeout import timeout
from dataclasses import dataclass
import crystallographic_graph
@dataclass(init=False)
class Geometry:
batch: torch.LongTensor
batch_edges: torch.LongTensor
batch_triplets: torch.LongTensor
num_atoms: torch.LongTensor
cell: torch.FloatTensor
x: torch.FloatTensor
lengths: torch.FloatTensor
angles: torch.FloatTensor
edges: crystallographic_graph.Edges
edges_e_ij: torch.FloatTensor
edges_v_ij: torch.FloatTensor
edges_u_ij: torch.FloatTensor
edges_r_ij: torch.FloatTensor
triplets: crystallographic_graph.Triplets
triplets_e_ij: torch.FloatTensor
triplets_e_ik: torch.FloatTensor
triplets_v_ij: torch.FloatTensor
triplets_v_ik: torch.FloatTensor
triplets_u_ij: torch.FloatTensor
triplets_u_ik: torch.FloatTensor
triplets_r_ij: torch.FloatTensor
triplets_r_ik: torch.FloatTensor
triplets_angle_ijk: torch.FloatTensor
triplets_cos_ijk: torch.FloatTensor
triplets_sin_ijk: torch.FloatTensor
def __init__(
self,
cell: torch.FloatTensor,
num_atoms: torch.LongTensor,
x: torch.FloatTensor,
mask: torch.BoolTensor = None,
knn: int = 0,
cutoff: float = 0,
check_tensor: bool = True,
edges: bool = True,
triplets: bool = True,
edges_idx: torch.LongTensor = None,
edges_attr: torch.LongTensor = None,
):
assert knn > 0 or cutoff > 0
if check_tensor:
shapes = assert_tensor_match(
(cell, shape("b", 3, 3, dtype=torch.float32)),
(num_atoms, shape("b", dtype=torch.long)),
(x, shape("n", 3, dtype=torch.float32)),
)
else:
shapes = build_shapes(
{
"b": cell.shape[0],
"n": x.shape[0],
}
)
assert (edges_idx is None) == (edges_attr is None)
self.num_atoms = num_atoms
self.cell = cell
self.x = x
self.edges = None
self.batch_edges = None
self.triplets = None
self.batch_triplets = None
struct_idx = torch.arange(shapes.b, device=x.device)
self.batch = struct_idx.repeat_interleave(num_atoms)
if edges:
if edges_idx is None:
self.edges = crystallographic_graph.make_graph(
self.cell, self.x, self.num_atoms, knn=knn, cutoff=cutoff
)
self.batch_edges = self.batch[self.edges.src]
else:
self.edges = crystallographic_graph.Edges(
src=edges_idx[0], dst=edges_idx[1], cell=edges_attr
)
self.batch_edges = self.batch[self.edges.src]
if triplets:
self.triplets = crystallographic_graph.make_triplets(
self.num_atoms, self.edges, check_tensor=check_tensor
)
self.batch_triplets = self.batch[self.triplets.src]
self.update_vectors()
def get_cell_parameters(self, cell=None):
if cell is None:
cell = self.cell
lengths = cell.norm(dim=2)
cross = torch.cross(cell[:, [1, 2, 0]], cell[:, [2, 0, 1]], dim=2)
dot = (cell[:, [1, 2, 0]] * cell[:, [2, 0, 1]]).sum(dim=2)
angles = torch.atan2(cross.norm(dim=2), dot)
return lengths, angles
def filter_edges(self, mask: torch.BoolTensor):
assert mask.shape == self.edges.src.shape
self.batch_edges = self.batch_edges[mask]
self.edges.src = self.edges.src[mask]
self.edges.dst = self.edges.dst[mask]
self.edges.cell = self.edges.cell[mask]
self.edges_e_ij = self.edges_e_ij[mask]
self.edges_v_ij = self.edges_v_ij[mask]
self.edges_r_ij = self.edges_r_ij[mask]
self.edges_u_ij = self.edges_u_ij[mask]
def filter_triplets(self, mask: torch.BoolTensor):
assert mask.shape == self.triplets.src.shape
self.batch_triplets = self.batch_triplets[mask]
self.triplets.src = self.triplets.src[mask]
self.triplets.dst_i = self.triplets.dst_i[mask]
self.triplets.cell_i = self.triplets.cell_i[mask]
self.triplets.dst_j = self.triplets.dst_j[mask]
self.triplets.cell_j = self.triplets.cell_j[mask]
self.triplets_e_ij = self.triplets_e_ij[mask]
self.triplets_v_ij = self.triplets_v_ij[mask]
self.triplets_r_ij = self.triplets_r_ij[mask]
self.triplets_u_ij = self.triplets_u_ij[mask]
self.triplets_e_ik = self.triplets_e_ik[mask]
self.triplets_v_ik = self.triplets_v_ik[mask]
self.triplets_r_ik = self.triplets_r_ik[mask]
self.triplets_u_ik = self.triplets_u_ik[mask]
self.triplets_cos_ijk = self.triplets_cos_ijk[mask]
self.triplets_sin_ijk = self.triplets_sin_ijk[mask]
self.triplets_angle_ijk = self.triplets_angle_ijk[mask]
def update_vectors(self, cell=None, x=None):
if cell is None:
cell = self.cell
if x is None:
x = self.x
self.lengths, self.angles = self.get_cell_parameters()
if self.edges is not None:
self.edges_e_ij = (
x[self.edges.dst, :] - x[self.edges.src, :] + self.edges.cell
)
edges_batch = self.batch[self.edges.src]
self.edges_v_ij = torch.bmm(
cell[edges_batch], self.edges_e_ij.unsqueeze(2)
).squeeze(2)
self.edges_r_ij = self.edges_v_ij.norm(dim=1)
self.edges_u_ij = self.edges_v_ij / self.edges_r_ij[:, None]
if self.edges_r_ij.isinf().any():
raise Exception("infinite edges")
else:
empty_scalar = torch.empty(
(0,), dtype=torch.float32, device=self.cell.device
)
empty_vector = torch.empty(
(0, 3), dtype=torch.float32, device=self.cell.device
)
self.edges_e_ij = empty_vector
self.edges_v_ij = empty_vector
self.edges_r_ij = empty_scalar
self.edges_u_ij = empty_vector
if (self.triplets is not None) and (self.triplets.src.shape[0] > 0):
self.triplets_e_ij = (
x[self.triplets.dst_i, :]
- x[self.triplets.src, :]
+ self.triplets.cell_i
)
self.triplets_e_ik = (
x[self.triplets.dst_j, :]
- x[self.triplets.src, :]
+ self.triplets.cell_j
)
triplets_batch = self.batch[self.triplets.src]
self.triplets_v_ij = torch.bmm(
cell[triplets_batch], self.triplets_e_ij.unsqueeze(2)
).squeeze(2)
self.triplets_v_ik = torch.bmm(
cell[triplets_batch], self.triplets_e_ik.unsqueeze(2)
).squeeze(2)
self.triplets_r_ij = self.triplets_v_ij.norm(dim=1)
self.triplets_r_ik = self.triplets_v_ik.norm(dim=1)
self.triplets_u_ij = self.triplets_v_ij / (
self.triplets_r_ij[:, None] + 1e-12
)
self.triplets_u_ik = self.triplets_v_ik / (
self.triplets_r_ik[:, None] + 1e-12
)
self.triplets_cos_ijk = (
self.triplets_u_ij * self.triplets_u_ik).sum(dim=1)
self.triplets_sin_ijk = torch.cross(
self.triplets_u_ij, self.triplets_u_ik
).norm(dim=1)
self.triplets_angle_ijk = torch.atan2(
self.triplets_sin_ijk, self.triplets_cos_ijk
)
else:
empty_scalar = torch.empty(
(0,), dtype=torch.float32, device=self.cell.device
)
empty_vector = torch.empty(
(0, 3), dtype=torch.float32, device=self.cell.device
)
self.triplets_e_ij = empty_vector
self.triplets_e_ik = empty_vector
self.triplets_v_ij = empty_vector
self.triplets_v_ik = empty_vector
self.triplets_u_ij = empty_vector
self.triplets_u_ik = empty_vector
self.triplets_r_ij = empty_scalar
self.triplets_r_ik = empty_scalar
self.triplets_cos_ijk = empty_scalar
self.triplets_sin_ijk = empty_scalar
self.triplets_angle_ijk = empty_scalar
if __name__ == "__main__":
from ase.neighborlist import neighbor_list
from ase.spacegroup import crystal
import torch.nn as nn
import numpy as np
class RandomCrystal(nn.Module):
def __init__(
self,
size_pdf: torch.FloatTensor,
std_lattice: float = 0.2,
scale_lattice: float = 5.0,
features: int = 128,
):
super().__init__()
self.features = features
self.scale_lattice = scale_lattice
self.std_lattice = std_lattice
size_cdf = torch.cumsum(size_pdf, dim=0)
assert size_cdf[-1] >= 1.0
self.size_cdf = nn.Parameter(size_cdf, requires_grad=False)
@property
def device(self):
return self.size_cdf.data.device
def forward(self, batch_size: int):
size = torch.bucketize(
torch.rand(batch_size, device=self.device), self.size_cdf
)
num_atoms = size.sum()
cells = self.scale_lattice * torch.matrix_exp(
self.std_lattice *
torch.randn(batch_size, 3, 3, device=self.device)
)
x = torch.rand(num_atoms, 3, device=self.device)
z = torch.rand(num_atoms, self.features, device=self.device)
return cells, x, z, size
batch_size = 256
pdf = torch.tensor([0.0, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
rand = RandomCrystal(pdf).to("cuda")
cell, x, z, size = rand(batch_size=batch_size)
batch = torch.arange(batch_size, device=size.device)
batch = batch.repeat_interleave(size)
geometry = Geometry(cell, size, x, knn=128, triplets=False)
distance = []
lengths = []
angles = []
for i in range(batch_size):
mask = batch == i
size_i = size[i].item()
cell_i = cell[i].clone().detach().cpu().numpy()
x_i = x[mask].clone().detach().cpu().numpy()
cry = crystal("C" * size_i, [tuple(x) for x in x_i], cell=cell_i)
[a, b, c, alpha, beta, gamma] = cry.get_cell_lengths_and_angles()
lengths.append([a, b, c])
angles.append([alpha, beta, gamma])
dist = neighbor_list("d", cry, cutoff=5.0)
distance.append(dist)
distance = np.concatenate(distance)
lengths = np.array(lengths)
angles = np.array(angles)
print(
"lengths mean absolut error",
np.max(np.abs(geometry.lengths.cpu().numpy() - lengths)),
)
print(
"angles mean absolut error",
np.max(np.abs(geometry.angles.cpu().numpy() - (angles * np.pi / 180))),
)
mask = geometry.edges_r_ij <= 5.0
geom_dist = geometry.edges_r_ij[mask].detach().cpu().numpy()
import matplotlib.pyplot as plt
hist1, bins = np.histogram(distance, bins=32, range=(0.0, 5.0))
hist2, _ = np.histogram(geom_dist, bins=32, range=(0.0, 5.0))
bins = 0.5 * (bins[1:] + bins[:-1])
plt.plot(bins, hist1)
plt.plot(bins, hist2)
plt.savefig("out.png")
| 11,649 | 30.233244 | 79 | py |
pegnn | pegnn-master/src/utils/io.py | from ctypes import Structure
import torch
import torch.nn.functional as F
from ase.spacegroup import crystal
import ase.io as io
import pandas as pd
from src.utils.visualize import select
import os
def write_cif(file_name, idx, cell, pos, z, num_atoms):
cell, pos, z = select(idx, cell, pos, z, num_atoms)
c = crystal(z, basis=pos, cell=cell)
c.write(file_name, format="cif")
def get_atoms(idx, cell, pos, z, num_atoms):
cell, pos, z = select(idx, cell, pos, z, num_atoms)
return crystal(z, basis=pos, cell=cell)
class AggregateBatch:
def __init__(self):
self.reset()
def reset(self):
self.cell = []
self.cell_noisy = []
self.cell_denoised = []
self.pos = []
self.z = []
self.num_atoms = []
def append(self, cell, cell_denoised, pos, z, num_atoms):
self.cell.append(cell.clone().detach().cpu())
self.cell_denoised.append(cell_denoised.clone().detach().cpu())
self.pos.append(pos.clone().detach().cpu())
self.z.append(z.clone().detach().cpu())
self.num_atoms.append(num_atoms.clone().detach().cpu())
def cat(self):
z = torch.cat(self.z, dim=0)
if z.ndim == 1:
z = F.one_hot(z, num_classes=100)
return (
torch.cat(self.cell, dim=0),
torch.cat(self.cell_denoised, dim=0),
torch.cat(self.pos, dim=0),
z,
torch.cat(self.num_atoms, dim=0)
)
def write(self, path, verbose=False):
cell, cell_denoised, pos, z, num_atoms = self.cat()
os.makedirs(path, exist_ok=True)
iterator = range(cell.shape[0])
if verbose:
import tqdm
iterator = tqdm.tqdm(iterator, desc=f"saving cif to {path}")
struct_original = []
struct_denoised = []
for idx in iterator:
struct_original.append(get_atoms(idx, cell, pos, z, num_atoms))
struct_denoised.append(
get_atoms(idx, cell_denoised, pos, z, num_atoms))
io.write(os.path.join(path, "original.cif"), struct_original)
io.write(os.path.join(path, "generated.cif"), struct_denoised)
| 2,197 | 27.179487 | 75 | py |
pegnn | pegnn-master/src/utils/visualize.py | import torch
from ase.spacegroup import crystal
from ase.visualize.plot import plot_atoms
import matplotlib.pyplot as plt
from src.utils.elements import elements
from src.models.operator.utils import lattice_params_to_matrix_torch
def select(idx, cell, pos, z, num_atoms):
struct_idx = torch.arange(num_atoms.shape[0], device=num_atoms.device)
batch = struct_idx.repeat_interleave(num_atoms)
mask = idx == batch
return (
cell[idx].clone().detach().cpu().numpy(),
pos[mask].clone().detach().cpu().numpy(),
z[mask].argmax(dim=1).clone().detach().cpu().numpy(),
)
def plot(ax, idx, cell, pos, z, num_atoms, radii=0.3, rotation=("90x,45y,0z")):
cell, pos, z = select(idx, cell, pos, z, num_atoms)
c = crystal(z, basis=pos, cell=cell)
plot_atoms(c, ax, radii=radii, rotation=rotation)
def plot_grid(
cell, pos, z, num_atoms, rows=2, cols=3, radii=0.3, rotation=("30x,30y,30z")
):
fig, axs = plt.subplots(rows, cols)
for i in range(rows):
for j in range(cols):
plot(
axs[i][j],
j + i * cols,
cell,
pos,
z,
num_atoms,
radii=radii,
rotation=rotation,
)
return fig
def generate_fig(original, denoised, n):
import matplotlib.pyplot as plt
from ase.visualize.plot import plot_atoms
from ase.spacegroup import crystal
L_o, x_o, z_o, atoms_count_o = original
batch_o = torch.arange(L_o.shape[0], device=L_o.device)
batch_atoms_o = batch_o.repeat_interleave(atoms_count_o)
L_t, x_t, z_t, atoms_count_t = denoised
batch_t = torch.arange(atoms_count_t.shape[0], device=atoms_count_t.device)
batch_atoms_t = batch_t.repeat_interleave(atoms_count_t)
elems = ["" for _ in range(128)]
for s, e in elements.items():
elems[e] = s
fig, axarr = plt.subplots(n, 2, figsize=(15, n * 5))
for i in range(n):
mask_o = batch_atoms_o == i
mask_t = batch_atoms_t == i
for k, (L, x, z, title) in enumerate(
zip(
[L_o[i], L_t[i]],
[x_o[mask_o], x_t[mask_t]],
[z_o[mask_o], z_t[mask_t]],
["original", "denoised"],
)
):
cell_i = L.clone().detach().cpu().numpy()
x_i = x.clone().detach().cpu().numpy()
z_i = z.clone().detach().cpu().numpy()
sym_i = [elems[max(e, 1)] for e in z_i]
cry = crystal(sym_i, [tuple(x) for x in x_i], cell=cell_i)
axarr[i][k].set_title(title)
axarr[i][k].set_axis_off()
try:
plot_atoms(cry, axarr[i][k], rotation=("45x,45y,0z"))
except:
pass
# fig.savefig(os.path.join(output_directory, f"gen_{n_iter}.png"))
return fig
def get_fig(batch, model, n, lattice_scaler=None):
# get data from the batch
L_real = batch.cell
x_real = batch.pos
z_real = batch.z
struct_size = batch.num_atoms
# denoise
L_denoised = model(L_real, x_real, z_real, struct_size)
if isinstance(L_denoised, tuple):
lengths_scaled, angles_scaled = lattice_scaler.denormalise(
L_denoised[0], L_denoised[1])
L_denoised = lattice_params_to_matrix_torch(
lengths_scaled, angles_scaled)
original = (L_real, x_real, z_real, struct_size)
denoised = (L_denoised, x_real, z_real, struct_size)
return generate_fig(original, denoised, n)
| 3,558 | 28.172131 | 80 | py |
T2TL | T2TL-main/src/T2TL.py |
import argparse
import time
import datetime
import torch
import torch_ac
import tensorboardX
import sys
import glob
from math import floor
import utils
from model import ACModel
from context_model import ContextACModel
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
## General parameters
parser.add_argument("--algo", default='ppo',
help="algorithm to use: a2c | ppo (REQUIRED)")
parser.add_argument("--env", default='Zones-25-v1',
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--ltl-sampler", default="Until_1_2_1_1",
help="the ltl formula template to sample from (default: DefaultSampler)")
parser.add_argument("--model", default=None,
help="name of the model (default: {ENV}_{SAMPLER}_{ALGO}_{TIME})")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default: 10)")
parser.add_argument("--save-interval", type=int, default=2,
help="number of updates between two saves (default: 10, 0 means no saving)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--frames", type=int, default=1*10**7,
help="number of frames of training (default: 2*10e8)")
parser.add_argument("--checkpoint-dir", default=None)
## Evaluation parameters
parser.add_argument("--eval", action="store_true", default=False,
help="evaluate the saved model (default: False)")
parser.add_argument("--eval-episodes", type=int, default=5,
help="number of episodes to evaluate on (default: 5)")
parser.add_argument("--eval-env", default=None,
help="name of the environment to train on (default: use the same \"env\" as training)")
parser.add_argument("--ltl-samplers-eval", default=None, nargs='+',
help="the ltl formula templates to sample from for evaluation (default: use the same \"ltl-sampler\" as training)")
parser.add_argument("--eval-procs", type=int, default=1,
help="number of processes (default: use the same \"procs\" as training)")
## Parameters for main algorithm
parser.add_argument("--epochs", type=int, default=10,
help="number of epochs for PPO (default: 4)")
parser.add_argument("--batch-size", type=int, default=1024,
help="batch size for PPO (default: 256)")
parser.add_argument("--frames-per-proc", type=int, default=4096,
help="number of frames per process before update (default: 5 for A2C and 128 for PPO)")
parser.add_argument("--discount", type=float, default=0.998,
help="discount factor (default: 0.99)")
parser.add_argument("--lr", type=float, default=0.0003,
help="learning rate (default: 0.0003)")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)")
parser.add_argument("--entropy-coef", type=float, default=0.003,
help="entropy term coefficient (default: 0.01)")
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="value loss term coefficient (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="maximum norm of gradient (default: 0.5)")
parser.add_argument("--optim-eps", type=float, default=1e-8,
help="Adam and RMSprop optimizer epsilon (default: 1e-8)")
parser.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer alpha (default: 0.99)")
parser.add_argument("--clip-eps", type=float, default=0.2,
help="clipping epsilon for PPO (default: 0.2)")
parser.add_argument("--ignoreLTL", action="store_true", default=False,
help="the network ignores the LTL input")
parser.add_argument("--noLTL", action="store_true", default=False,
help="the environment no longer has an LTL goal. --ignoreLTL must be specified concurrently.")
parser.add_argument("--progression-mode", default="full",
help="Full: uses LTL progression; partial: shows the propositions which progress or falsify the formula; none: only original formula is seen. ")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--gnn", default="Transformer", help="use gnn to model the LTL (only if ignoreLTL==True)")
parser.add_argument("--trans_layer", type=int, default=1, help="the number of Transformer layers need to use")
parser.add_argument("--int-reward", type=float, default=0.0, help="the intrinsic reward for LTL progression (default: 0.0)")
parser.add_argument("--pretrained-gnn", action="store_true", default=False, help="load a pre-trained LTL module.")
parser.add_argument("--dumb-ac", action="store_true", default=False,help="Use a single-layer actor-critic")
parser.add_argument("--freeze-ltl", action="store_true", default=False,help="Freeze the gradient updates of the LTL module")
# Transformer special parameters
parser.add_argument("--d_model", type=int, default=64, help="")
parser.add_argument("--nhead", type=int, default=8, help="")
parser.add_argument("--num_encoder_layers", type=int, default=4, help="")
parser.add_argument("--pool", type=str, default='mean', help="")
parser.add_argument("--dim_feedforward", type=int, default=256, help="")
parser.add_argument("--dropout", type=float, default=0.0, help="")
parser.add_argument("--d_out", type=int, default=16, help="")
parser.add_argument("--layer_norm_eps", type=float, default=1e-5, help="")
parser.add_argument("--TFixup", type=bool, default=True, help="")
# Context Special variable
parser.add_argument("--ContType", type=str, default='Transformer',
help="To choose which model to encode CONTEXT VARIABLE (e.g., RNN, Transformer)")
parser.add_argument("--use_cont", type=bool, default=True, help="")
parser.add_argument("--hist_length", type=int, default=8, help="")
parser.add_argument("--cont_dim", type=int, default=16, help="")
parser.add_argument("--cont_d_model", type=int, default=64, help="")
parser.add_argument("--cont_nhead", type=int, default=8, help="")
parser.add_argument("--cont_num_encoder_layers", type=int, default=2, help="")
parser.add_argument("--cont_pool", type=str, default='mean', help="")
parser.add_argument("--cont_dim_feedforward", type=int, default=256, help="")
parser.add_argument("--cont_d_out", type=int, default=16, help="")
# device
parser.add_argument("--cuda", type=str, default='cuda:0', help="")
parser.add_argument("--device", type=str, default='cuda:0', help="")
# additional desciption for test
parser.add_argument("--sth", type=str, default='None', help="")
args = parser.parse_args()
use_mem = args.recurrence > 1 # whether use memory or not
# Set run dir
date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") # '21-08-21-22-36-39'
gnn_name = args.gnn
if args.ignoreLTL:
gnn_name = "IgnoreLTL"
if args.dumb_ac:
gnn_name = gnn_name + "-dumb_ac"
if args.pretrained_gnn:
gnn_name = gnn_name + "-pretrained"
if args.freeze_ltl:
gnn_name = gnn_name + "-freeze_ltl"
if use_mem:
gnn_name = gnn_name + "-recurrence:%d"%(args.recurrence)
if args.gnn == 'Transformer':
default_model_name = f"{gnn_name}+TL2_{args.env}_seed:{args.seed}_bs:{args.batch_size}_n_encoder:{args.num_encoder_layers}_nhead:{args.nhead}_d_model:{args.d_model}_FFD:{args.dim_feedforward}_hist:{args.hist_length}_L_Cont:{args.cont_num_encoder_layers}_Init:{args.TFixup}_sth:{args.sth}"
else:
# 'RGCN_8x32_ROOT_SHARED_Until_1_2_1_1_Zones-5-v0_seed:1_epochs:10_bs:2048_fpp:4096_dsc:0.998_lr:0.0003_ent:0.003_clip:0.2_prog:full'
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_epochs:{args.epochs}_bs:{args.batch_size}_fpp:{args.frames_per_proc}_dsc:{args.discount}_lr:{args.lr}_ent:{args.entropy_coef}_clip:{args.clip_eps}_prog:{args.progression_mode}"
model_name = args.model or default_model_name
storage_dir = "storage" if args.checkpoint_dir is None else args.checkpoint_dir
model_dir = utils.get_model_dir(model_name, storage_dir)
pretrained_model_dir = None
if args.pretrained_gnn:
assert(args.progression_mode == "full")
# default_dir = f"symbol-storage/{args.gnn}-dumb_ac_{args.ltl_sampler}_Simple-LTL-Env-v0_seed:{args.seed}_*_prog:{args.progression_mode}/train"
default_dir = f"{args.gnn}-dumb_ac_{args.ltl_sampler}_Simple-LTL-Env-v0_seed:{args.seed}_*_prog:{args.progression_mode}/train"
print(default_dir)
model_dirs = glob.glob(default_dir)
if len(model_dirs) == 0:
raise Exception("Pretraining directory not found.")
elif len(model_dirs) > 1:
raise Exception("More than 1 candidate pretraining directory found.")
pretrained_model_dir = model_dirs[0]
# Load loggers and Tensorboard writer
txt_logger = utils.get_txt_logger(model_dir + "/train")
csv_file, csv_logger = utils.get_csv_logger(model_dir + "/train")
tb_writer = tensorboardX.SummaryWriter(model_dir + "/train")
utils.save_config(model_dir + "/train", args)
# Log command and all script arguments
txt_logger.info("{}\n".format(" ".join(sys.argv)))
txt_logger.info("{}\n".format(args)) # It will output the context of Namespace
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device(args.cuda if torch.cuda.is_available() else "cpu")
# device = torch.device('cpu')
txt_logger.info(f"Device: {device}\n") # Output the device (default is cpu)
# Load environments
envs = []
progression_mode = args.progression_mode
for i in range(args.procs): # load the env & progression_mode & LTL formula
# turn to utils/env.py
envs.append(utils.make_env(args.env, progression_mode, args.ltl_sampler, args.seed, args.int_reward, args.noLTL))
# Sync environments
envs[0].reset() # Add the agent to map & translate the LTL formula
txt_logger.info("Environments loaded\n")
# Load training status
try:
status = utils.get_status(model_dir + "/train", args)
except OSError:
status = {"num_frames": 0, "update": 0} # ??? the state of algorithm ?
txt_logger.info("Training status loaded.\n")
if pretrained_model_dir is not None:
try:
pretrained_status = utils.get_status(pretrained_model_dir, args)
except:
txt_logger.info("Failed to load pretrained model.\n")
exit(1)
# Load observations preprocessor-- build AST
using_gnn = (args.gnn != "GRU" and args.gnn != "LSTM" and args.gnn != "Transformer")
# turn to env/format.py
obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0], using_gnn, progression_mode)
if "vocab" in status and preprocess_obss.vocab is not None:
preprocess_obss.vocab.load_vocab(status["vocab"])
txt_logger.info("Observations preprocessor loaded.\n")
# Load model
if use_mem:
acmodel = RecurrentACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl)
else:
acmodel = ContextACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl, args)
if "model_state" in status:
acmodel.load_state_dict(status["model_state"])
txt_logger.info("Loading model from existing run.\n")
elif args.pretrained_gnn:
acmodel.load_pretrained_gnn(pretrained_status["model_state"])
txt_logger.info("Pretrained model loaded.\n")
acmodel.to(device)
txt_logger.info("Model loaded.\n")
txt_logger.info("{}\n".format(acmodel))
# Load algo
if args.algo == "a2c":
algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_alpha, args.optim_eps, preprocess_obss)
elif args.algo == "ppo":
algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss,
history_length=args.hist_length)
else:
raise ValueError("Incorrect algorithm name: {}".format(args.algo))
if "optimizer_state" in status:
algo.optimizer.load_state_dict(status["optimizer_state"])
txt_logger.info("Loading optimizer from existing run.\n")
txt_logger.info("Optimizer loaded.\n")
# init the evaluator
if args.eval:
eval_samplers = args.ltl_samplers_eval if args.ltl_samplers_eval else [args.ltl_sampler]
eval_env = args.eval_env if args.eval_env else args.env
eval_procs = args.eval_procs if args.eval_procs else args.procs
evals = []
for eval_sampler in eval_samplers:
evals.append(utils.Eval(eval_env, model_name, eval_sampler,
seed=args.seed, device=device, num_procs=eval_procs, ignoreLTL=args.ignoreLTL, progression_mode=progression_mode, gnn=args.gnn, dumb_ac = args.dumb_ac))
# Train model
num_frames = status["num_frames"] # num_frames:0
update = status["update"] # update:0
start_time = time.time()
while num_frames < args.frames:
# Update model parameters
update_start_time = time.time()
exps, logs1 = algo.collect_experiences()
# interacte with environmets (very important)
logs2 = algo.update_parameters(exps)
logs = {**logs1, **logs2}
update_end_time = time.time()
num_frames += logs["num_frames"]
update += 1
# Print logs
if update % args.log_interval == 0:
fps = logs["num_frames"]/(update_end_time - update_start_time)
duration = int(time.time() - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"])
average_reward_per_step = utils.average_reward_per_step(logs["return_per_episode"], logs["num_frames_per_episode"])
average_discounted_return = utils.average_discounted_return(logs["return_per_episode"], logs["num_frames_per_episode"], args.discount)
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
header = ["update", "frames", "FPS", "duration"]
data = [update, num_frames, fps, duration]
header += ["rreturn_" + key for key in rreturn_per_episode.keys()]
data += rreturn_per_episode.values()
header += ["average_reward_per_step", "average_discounted_return"]
data += [average_reward_per_step, average_discounted_return]
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["entropy", "value", "policy_loss", "value_loss", "grad_norm"]
data += [logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"]]
txt_logger.info(
"U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | ARPS: {:.3f} | ADR: {:.3f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}"
.format(*data))
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
if status["num_frames"] == 0:
csv_logger.writerow(header)
csv_logger.writerow(data)
csv_file.flush()
for field, value in zip(header, data):
tb_writer.add_scalar(field, value, num_frames)
# Save status
if args.save_interval > 0 and update % args.save_interval == 0:
status = {"num_frames": num_frames, "update": update,
"model_state": algo.acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict()}
if hasattr(preprocess_obss, "vocab") and preprocess_obss.vocab is not None:
status["vocab"] = preprocess_obss.vocab.vocab
utils.save_status(status, model_dir + "/train")
txt_logger.info("Status saved")
if args.eval:
# we send the num_frames to align the eval curves with the training curves on TB
for evalu in evals:
evalu.eval(num_frames, episodes=args.eval_episodes)
| 17,759 | 50.32948 | 296 | py |
T2TL | T2TL-main/src/T1TL_pretrain.py |
import argparse
import time
import datetime
import torch
import torch_ac
import tensorboardX
import sys
import glob
from math import floor
import utils
from model import ACModel
from recurrent_model import RecurrentACModel
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
## General parameters
parser.add_argument("--algo", default='ppo',
help="algorithm to use: a2c | ppo (REQUIRED)")
parser.add_argument("--env", default='Zones-25-v1',
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--ltl-sampler", default="Until_1_2_1_1",
help="the ltl formula template to sample from (default: DefaultSampler)")
parser.add_argument("--model", default=None,
help="name of the model (default: {ENV}_{SAMPLER}_{ALGO}_{TIME})")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default: 10)")
parser.add_argument("--save-interval", type=int, default=2,
help="number of updates between two saves (default: 10, 0 means no saving)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--frames", type=int, default=1*10**7,
help="number of frames of training (default: 2*10e8)")
parser.add_argument("--checkpoint-dir", default=None)
## Evaluation parameters
parser.add_argument("--eval", action="store_true", default=False,
help="evaluate the saved model (default: False)")
parser.add_argument("--eval-episodes", type=int, default=5,
help="number of episodes to evaluate on (default: 5)")
parser.add_argument("--eval-env", default=None,
help="name of the environment to train on (default: use the same \"env\" as training)")
parser.add_argument("--ltl-samplers-eval", default=None, nargs='+',
help="the ltl formula templates to sample from for evaluation (default: use the same \"ltl-sampler\" as training)")
parser.add_argument("--eval-procs", type=int, default=1,
help="number of processes (default: use the same \"procs\" as training)")
## Parameters for main algorithm
parser.add_argument("--epochs", type=int, default=10,
help="number of epochs for PPO (default: 4)")
parser.add_argument("--batch-size", type=int, default=1024,
help="batch size for PPO (default: 256)")
parser.add_argument("--frames-per-proc", type=int, default=4096,
help="number of frames per process before update (default: 5 for A2C and 128 for PPO)")
parser.add_argument("--discount", type=float, default=0.998,
help="discount factor (default: 0.99)")
parser.add_argument("--lr", type=float, default=0.0003,
help="learning rate (default: 0.0003)")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)")
parser.add_argument("--entropy-coef", type=float, default=0.003,
help="entropy term coefficient (default: 0.01)")
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="value loss term coefficient (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="maximum norm of gradient (default: 0.5)")
parser.add_argument("--optim-eps", type=float, default=1e-8,
help="Adam and RMSprop optimizer epsilon (default: 1e-8)")
parser.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer alpha (default: 0.99)")
parser.add_argument("--clip-eps", type=float, default=0.2,
help="clipping epsilon for PPO (default: 0.2)")
parser.add_argument("--ignoreLTL", action="store_true", default=False,
help="the network ignores the LTL input")
parser.add_argument("--noLTL", action="store_true", default=False,
help="the environment no longer has an LTL goal. --ignoreLTL must be specified concurrently.")
parser.add_argument("--progression-mode", default="full",
help="Full: uses LTL progression; partial: shows the propositions which progress or falsify the formula; none: only original formula is seen. ")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--gnn", default="Transformer", help="use gnn to model the LTL (only if ignoreLTL==True)")
parser.add_argument("--trans_layer", type=int, default=1, help="the number of Transformer layers need to use")
parser.add_argument("--int-reward", type=float, default=0.0, help="the intrinsic reward for LTL progression (default: 0.0)")
parser.add_argument("--pretrained-gnn", action="store_true", default=True, help="load a pre-trained LTL module.")
parser.add_argument("--dumb-ac", action="store_true", default=False, help="Use a single-layer actor-critic")
parser.add_argument("--freeze-ltl", action="store_true", default=False, help="Freeze the gradient updates of the LTL module")
# Transformer special parameters
parser.add_argument("--d_model", type=int, default=64, help="")
parser.add_argument("--nhead", type=int, default=8, help="")
parser.add_argument("--num_encoder_layers", type=int, default=4, help="")
parser.add_argument("--pool", type=str, default='mean', help="")
parser.add_argument("--dim_feedforward", type=int, default=256, help="")
parser.add_argument("--dropout", type=float, default=0.0, help="")
parser.add_argument("--d_out", type=int, default=16, help="")
parser.add_argument("--layer_norm_eps", type=float, default=1e-5, help="")
parser.add_argument("--TFixup", type=bool, default=True, help="")
parser.add_argument("--cuda", type=str, default='cuda:0', help="")
# additional desciption for test
parser.add_argument("--sth", type=str, default='None', help="")
args = parser.parse_args()
use_mem = args.recurrence > 1 # whether use memory or not
# Set run dir
date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") # '21-08-21-22-36-39'
gnn_name = args.gnn
if args.ignoreLTL:
gnn_name = "IgnoreLTL"
if args.dumb_ac:
gnn_name = gnn_name + "-dumb_ac"
if args.pretrained_gnn:
gnn_name = gnn_name + "-pretrained"
if args.freeze_ltl:
gnn_name = gnn_name + "-freeze_ltl"
if use_mem:
gnn_name = gnn_name + "-recurrence:%d"%(args.recurrence)
if args.gnn == 'Transformer':
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_n_encoder:{args.num_encoder_layers}_nhead:{args.nhead}_d_model:{args.d_model}_FFD:{args.dim_feedforward}_d_out:{args.d_out}_sth:{args.sth}"
else:
# 'RGCN_8x32_ROOT_SHARED_Until_1_2_1_1_Zones-5-v0_seed:1_epochs:10_bs:2048_fpp:4096_dsc:0.998_lr:0.0003_ent:0.003_clip:0.2_prog:full'
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_epochs:{args.epochs}_bs:{args.batch_size}_fpp:{args.frames_per_proc}_dsc:{args.discount}_lr:{args.lr}_ent:{args.entropy_coef}_clip:{args.clip_eps}_prog:{args.progression_mode}"
model_name = args.model or default_model_name
storage_dir = "storage" if args.checkpoint_dir is None else args.checkpoint_dir
model_dir = utils.get_model_dir(model_name, storage_dir)
pretrained_model_dir = None
if args.pretrained_gnn:
assert(args.progression_mode == "full")
# default_dir = f"symbol-storage/{args.gnn}-dumb_ac_{args.ltl_sampler}_Simple-LTL-Env-v0_seed:{args.seed}_*_prog:{args.progression_mode}/train"
# default_dir = f"../src/pretrain/{args.gnn}-dumb_ac_*_Simple-LTL-Env-v0_seed:{args.seed}_*_d_out:{args.d_out}/train"
default_dir = f"../src/pretrain/{args.gnn}-dumb_ac_*_Simple-LTL-Env-v0_seed:{args.seed}_n_encoder:{args.num_encoder_layers}_nhead:{args.nhead}_d_model:{args.d_model}_dim_feedforward:{args.dim_feedforward}_d_out:{args.d_out}_None/train"
print(default_dir)
model_dirs = glob.glob(default_dir)
if len(model_dirs) == 0:
raise Exception("Pretraining directory not found.")
elif len(model_dirs) > 1:
raise Exception("More than 1 candidate pretraining directory found.")
pretrained_model_dir = model_dirs[0]
# Load loggers and Tensorboard writer
txt_logger = utils.get_txt_logger(model_dir + "/train")
csv_file, csv_logger = utils.get_csv_logger(model_dir + "/train")
tb_writer = tensorboardX.SummaryWriter(model_dir + "/train")
utils.save_config(model_dir + "/train", args)
# Log command and all script arguments
txt_logger.info("{}\n".format(" ".join(sys.argv)))
txt_logger.info("{}\n".format(args)) # It will output the context of Namespace
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device(args.cuda if torch.cuda.is_available() else "cpu")
# device = torch.device('cpu')
txt_logger.info(f"Device: {device}\n") # Output the device (default is cpu)
# Load environments
envs = []
progression_mode = args.progression_mode
for i in range(args.procs): # load the env & progression_mode & LTL formula
# turn to utils/env.py
envs.append(utils.make_env(args.env, progression_mode, args.ltl_sampler, args.seed, args.int_reward, args.noLTL))
# Sync environments
envs[0].reset() # Add the agent to map & translate the LTL formula
txt_logger.info("Environments loaded\n")
# Load training status
try:
status = utils.get_status(model_dir + "/train", args)
except OSError:
status = {"num_frames": 0, "update": 0} # ??? the state of algorithm ?
txt_logger.info("Training status loaded.\n")
if pretrained_model_dir is not None:
try:
pretrained_status = utils.get_status(pretrained_model_dir, args)
except:
txt_logger.info("Failed to load pretrained model.\n")
exit(1)
# Load observations preprocessor-- build AST
using_gnn = (args.gnn != "GRU" and args.gnn != "LSTM" and args.gnn != "Transformer")
# turn to env/format.py
obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0], using_gnn, progression_mode)
if "vocab" in status and preprocess_obss.vocab is not None:
preprocess_obss.vocab.load_vocab(status["vocab"])
txt_logger.info("Observations preprocessor loaded.\n")
# Load model
if use_mem:
acmodel = RecurrentACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl)
else:
acmodel = ACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl, args)
if "model_state" in status:
acmodel.load_state_dict(status["model_state"])
txt_logger.info("Loading model from existing run.\n")
elif args.pretrained_gnn:
acmodel.load_pretrained_gnn(pretrained_status["model_state"])
txt_logger.info("Pretrained model loaded.\n")
acmodel.to(device)
txt_logger.info("Model loaded.\n")
txt_logger.info("{}\n".format(acmodel))
# Load algo
if args.algo == "a2c":
algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_alpha, args.optim_eps, preprocess_obss)
elif args.algo == "ppo":
algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss)
else:
raise ValueError("Incorrect algorithm name: {}".format(args.algo))
if "optimizer_state" in status:
algo.optimizer.load_state_dict(status["optimizer_state"])
txt_logger.info("Loading optimizer from existing run.\n")
txt_logger.info("Optimizer loaded.\n")
# init the evaluator
if args.eval:
eval_samplers = args.ltl_samplers_eval if args.ltl_samplers_eval else [args.ltl_sampler]
eval_env = args.eval_env if args.eval_env else args.env
eval_procs = args.eval_procs if args.eval_procs else args.procs
evals = []
for eval_sampler in eval_samplers:
evals.append(utils.Eval(eval_env, model_name, eval_sampler,
seed=args.seed, device=device, num_procs=eval_procs, ignoreLTL=args.ignoreLTL, progression_mode=progression_mode, gnn=args.gnn, dumb_ac = args.dumb_ac))
# Train model
num_frames = status["num_frames"] # num_frames:0
update = status["update"] # update:0
start_time = time.time()
while num_frames < args.frames:
# Update model parameters
update_start_time = time.time()
exps, logs1 = algo.collect_experiences()
# interacte with environmets (very important)
logs2 = algo.update_parameters(exps)
logs = {**logs1, **logs2}
update_end_time = time.time()
num_frames += logs["num_frames"]
update += 1
# Print logs
if update % args.log_interval == 0:
fps = logs["num_frames"]/(update_end_time - update_start_time)
duration = int(time.time() - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"])
average_reward_per_step = utils.average_reward_per_step(logs["return_per_episode"], logs["num_frames_per_episode"])
average_discounted_return = utils.average_discounted_return(logs["return_per_episode"], logs["num_frames_per_episode"], args.discount)
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
header = ["update", "frames", "FPS", "duration"]
data = [update, num_frames, fps, duration]
header += ["rreturn_" + key for key in rreturn_per_episode.keys()]
data += rreturn_per_episode.values()
header += ["average_reward_per_step", "average_discounted_return"]
data += [average_reward_per_step, average_discounted_return]
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["entropy", "value", "policy_loss", "value_loss", "grad_norm"]
data += [logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"]]
txt_logger.info(
"U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | ARPS: {:.3f} | ADR: {:.3f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}"
.format(*data))
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
if status["num_frames"] == 0:
csv_logger.writerow(header)
csv_logger.writerow(data)
csv_file.flush()
for field, value in zip(header, data):
tb_writer.add_scalar(field, value, num_frames)
# Save status
if args.save_interval > 0 and update % args.save_interval == 0:
status = {"num_frames": num_frames, "update": update,
"model_state": algo.acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict()}
if hasattr(preprocess_obss, "vocab") and preprocess_obss.vocab is not None:
status["vocab"] = preprocess_obss.vocab.vocab
utils.save_status(status, model_dir + "/train")
txt_logger.info("Status saved")
if args.eval:
# we send the num_frames to align the eval curves with the training curves on TB
for evalu in evals:
evalu.eval(num_frames, episodes=args.eval_episodes)
| 16,899 | 50.057402 | 265 | py |
T2TL | T2TL-main/src/context_model.py | """
This is the description of the deep NN currently being used.
It is a small CNN for the features with an GRU encoding of the LTL task.
The features and LTL are preprocessed by utils.format.get_obss_preprocessor(...) function:
- In that function, I transformed the LTL tuple representation into a text representation:
- Input: ('until',('not','a'),('and', 'b', ('until',('not','c'),'d')))
- output: ['until', 'not', 'a', 'and', 'b', 'until', 'not', 'c', 'd']
Each of those tokens get a one-hot embedding representation by the utils.format.Vocabulary class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import torch_ac
import copy
from gym.spaces import Box, Discrete
from gnns.graphs.GCN import *
from gnns.graphs.GNN import GNNMaker
from env_model import getEnvModel
from policy_network import PolicyNetwork
from transEncoder import ContextTransformer
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
class ContextACModel(nn.Module,):
def __init__(self, env, obs_space, action_space, ignoreLTL, gnn_type, dumb_ac, freeze_ltl, args):
super().__init__()
# Decide which components are enabled
self.use_progression_info = "progress_info" in obs_space
self.use_text = not ignoreLTL and (gnn_type == "GRU" or gnn_type == "LSTM") and "text" in obs_space
self.use_ast = not ignoreLTL and ("GCN" in gnn_type) and "text" in obs_space # True
self.use_trans = not ignoreLTL and ("Transformer" in gnn_type) and "text" in obs_space # True
self.gnn_type = gnn_type
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.action_space = action_space
self.dumb_ac = dumb_ac
self.recurrent = False
self.context = True
self.cont_dim = args.cont_dim # the dimension for context variable
self.freeze_pretrained_params = freeze_ltl
if self.freeze_pretrained_params:
print("Freezing the LTL module.")
self.env_model = getEnvModel(env, obs_space)
# Define text embedding
if self.use_progression_info:
self.text_embedding_size = 32
self.simple_encoder = nn.Sequential(
nn.Linear(obs_space["progress_info"], 64),
nn.Tanh(),
nn.Linear(64, self.text_embedding_size),
nn.Tanh()
).to(self.device)
print("Linear encoder Number of parameters:",
sum(p.numel() for p in self.simple_encoder.parameters() if p.requires_grad))
elif self.use_text:
self.word_embedding_size = 32
self.text_embedding_size = args.gnn_out
if self.gnn_type == "GRU":
self.text_rnn = GRUModel(obs_space["text"], self.word_embedding_size, 16, self.text_embedding_size).to(
self.device)
else:
assert (self.gnn_type == "LSTM")
self.text_rnn = LSTMModel(obs_space["text"], self.word_embedding_size, 16, self.text_embedding_size).to(
self.device)
print("RNN Number of parameters:", sum(p.numel() for p in self.text_rnn.parameters() if p.requires_grad))
elif self.use_ast:
hidden_dim = 32
self.text_embedding_size = 32
self.gnn = GNNMaker(self.gnn_type, obs_space["text"], self.text_embedding_size).to(self.device)
# for param in self.gnn.parameters():
# param.requires_grad = False
print("GNN Number of parameters:", sum(p.numel() for p in self.gnn.parameters() if p.requires_grad))
elif self.use_trans:
self.word_embedding_size = 512
self.text_embedding_size = args.d_out
self.ltl2transformer = TransfomerSyn(obs_space["text"], self.word_embedding_size, self.text_embedding_size,
'mean', args)
# for param in self.ltl2transformer.parameters():
# param.requires_grad = False
print("Transformer Number of parameters:",
sum(p.numel() for p in self.ltl2transformer.parameters() if p.requires_grad))
# Resize image embedding
self.embedding_size = self.env_model.size() # 64
# Context specific code
reward_dim = 1
action_dim = self.action_space.shape[0] # action dim
input_dim_context = action_dim + reward_dim + self.embedding_size
# self.context = Context(hidden_sizes=[args.cont_dim],
# input_dim=input_dim_context,
# history_length=args.hist_length,
# action_dim=action_dim,
# obsr_dim=self.embedding_size,
# device=args.device) # todo: update
self.context = ContextTransformer(input_dim_context,
self.embedding_size,
self.word_embedding_size,
self.text_embedding_size,
'mean',
args,
context=True)
print("embedding size:", self.embedding_size)
if self.use_text or self.use_ast or self.use_progression_info or self.use_trans:
self.embedding_size += self.text_embedding_size # 96
if self.dumb_ac:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size, self.action_space)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 1)
)
else:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size + self.cont_dim,
self.action_space,
hiddens=[64, 64, 64],
activation=nn.ReLU())
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size + self.cont_dim, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
# Initialize parameters correctly
self.apply(init_params)
if self.use_trans and args.TFixup:
self.ltl2transformer.init_by_TFixup(args)
print("Transformer Module has been initialized")
self.context.init_by_TFixup(args)
print("Context Module has been initialized")
def forward(self, obs, context):
embedding_ = self.env_model(obs) # shape = torch.Size([16, 64])
cont = self.context(context)
if self.use_progression_info:
embed_ltl = self.simple_encoder(obs.progress_info)
embedding = torch.cat((embedding_, embed_ltl), dim=1) if embedding_ is not None else embed_ltl
# Adding Text
elif self.use_text:
embed_text = self.text_rnn(obs.text)
embedding = torch.cat((embedding_, embed_text), dim=1) if embedding_ is not None else embed_text
# Adding GNN
elif self.use_ast:
embed_gnn = self.gnn(obs.text) # shape = torch.Size([16, 32])
embedding = torch.cat((embedding_, embed_gnn),
dim=1) if embedding_ is not None else embed_gnn # shape = torch.Size([16, 96])
elif self.use_trans:
embed_transformer = self.ltl2transformer(obs.text)
embedding = torch.cat((embedding_, embed_transformer), dim=1) if embedding_ is not None else embed_transformer
embedding = torch.cat((embedding, cont), dim=1)
# Actor
dist = self.actor(embedding)
# Critic
x = self.critic(embedding)
value = x.squeeze(1)
return dist, value, embedding_
def load_pretrained_gnn(self, model_state):
# We delete all keys relating to the actor/critic.
new_model_state = model_state.copy()
for key in model_state.keys():
if key.find("actor") != -1 or key.find("critic") != -1: # ??? key.find()?
del new_model_state[key]
self.load_state_dict(new_model_state, strict=False)
if self.freeze_pretrained_params:
target = self.text_rnn if self.gnn_type == "GRU" or self.gnn_type == "LSTM" else self.gnn
for param in target.parameters():
param.requires_grad = False
class TransfomerSyn(nn.Module):
def __init__(self, obs_size, d_model, d_out, pool, args, context=False):
super(TransfomerSyn, self).__init__()
self.context = context
if self.context:
self.embed_linear = nn.Linear(obs_size, args.d_model)
self.transformer = TransformerEncoderModel(d_model=args.cont_d_model//2, nhead=args.cont_nhead,
num_encoder_layers=args.cont_num_encoder_layers//2,
pool=args.cont_pool, dim_feedforward=args.cont_dim_feedforward//2,
dropout=args.dropout, d_out=args.cont_d_out,
layer_norm_eps=args.layer_norm_eps)
else:
self.embedded = nn.Embedding(obs_size, args.d_model)
self.transformer = TransformerEncoderModel(d_model=args.d_model, nhead=args.nhead,
num_encoder_layers=args.num_encoder_layers,
pool=args.pool, dim_feedforward=args.dim_feedforward,
dropout=args.dropout, d_out=args.d_out,
layer_norm_eps=args.layer_norm_eps)
def forward(self, text):
if self.context:
embed_text = self.embed_linear(text)
else:
embed_text = self.embedded(text)
feature = self.transformer(embed_text)
return feature
def init_by_TFixup(self, args): # todo:debug
# for k, v in self.transformer.named_parameters():
# print(k, v, v.shape)
for p in self.embedded.parameters():
if p.dim() > 1:
torch.nn.init.normal_(p, 0, args.d_model ** (- 1. / 2.))
temp_state_dic = {}
for name, param in self.embedded.named_parameters():
if 'weight' in name:
temp_state_dic[name] = ((9 * args.num_encoder_layers) ** (- 1. / 4.)) * param
for name in self.embedded.state_dict():
if name not in temp_state_dic:
temp_state_dic[name] = self.embedded.state_dict()[name]
self.embedded.load_state_dict(temp_state_dic)
temp_state_dic = {}
for name, param in self.transformer.named_parameters():
if any(s in name for s in ["linear1.weight", "linear2.weight", "self_attn.out_proj.weight"]):
temp_state_dic[name] = (0.67 * (args.num_encoder_layers) ** (- 1. / 4.)) * param
elif "self_attn.in_proj_weight" in name:
temp_state_dic[name] = (0.67 * (args.num_encoder_layers) ** (- 1. / 4.)) * (param * (2 ** 0.5))
for name in self.transformer.state_dict():
if name not in temp_state_dic:
temp_state_dic[name] = self.transformer.state_dict()[name]
self.transformer.load_state_dict(temp_state_dic)
class LSTMModel(nn.Module):
def __init__(self, obs_size, word_embedding_size=32, hidden_dim=32, text_embedding_size=32):
super().__init__()
# For all our experiments we want the embedding to be a fixed size so we can "transfer".
self.word_embedding = nn.Embedding(obs_size, word_embedding_size)
self.lstm = nn.LSTM(word_embedding_size, hidden_dim, num_layers=2, batch_first=True, bidirectional=True)
self.output_layer = nn.Linear(2 * hidden_dim, text_embedding_size)
def forward(self, text):
hidden, _ = self.lstm(self.word_embedding(text))
return self.output_layer(hidden[:, -1, :])
class GRUModel(nn.Module):
def __init__(self, obs_size, word_embedding_size=32, hidden_dim=32, text_embedding_size=32):
super().__init__()
self.word_embedding = nn.Embedding(obs_size, word_embedding_size)
# word_embedding_size = 32, hidden_dim = 16
self.gru = nn.GRU(word_embedding_size, hidden_dim, num_layers=2, batch_first=True, bidirectional=True)
self.output_layer = nn.Linear(2 * hidden_dim, text_embedding_size)
def forward(self, text):
# hidden_shape: [16, 9, 32] _shape: [4, 16, 16]
hidden, _ = self.gru(self.word_embedding(text))
return self.output_layer(hidden[:, -1, :])
class TransformerEncoderModel(nn.Module):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 1, pool: str = 'mean',
dim_feedforward: int = 2048, dropout: float = 0.1, d_out: int = 8, activation=F.relu,
layer_norm_eps: float = 1e-5, batch_first: bool = True, norm_first: bool = False):
"""
:param d_model: the number of expected features in the encoder/decoder inputs (default=512).
:param nhead: the number of heads in the multiheadattention models (default=8).
:param num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
:param dim_feedforward: the dimension of the feedforward network model (default=2048).
:param dropout: the dropout value (default=0.1).
:param activation: the activation function of encoder/decoder intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
:param layer_norm_eps: the eps value in layer normalization components (default=1e-5).
:param batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
:param norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
other attention and feedforward operations, otherwise after. Default: ``False``
Examples::
>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
>>> src = torch.rand((10, 32, 512))
>>> out = transformer_model(src)
"""
super(TransformerEncoderModel, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.batch_first = batch_first
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, layer_norm_eps, batch_first, norm_first)
encoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(d_model),
nn.Linear(d_model, d_out)
)
self._reset_parameters()
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""
Args:
src: the sequence to the encoder (required).
src_mask: the additive mask for the src sequence (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
Shape:
- src: :math:`(S, N, E)`, `(N, S, E)` if batch_first.
- src_mask: :math:`(S, S)`.
- src_key_padding_mask: :math:`(N, S)`.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
"""
memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
memory = memory.mean(dim=1) if self.pool == 'mean' else memory[:, -1, :]
memory = self.to_latent(memory)
memory = torch.tanh(self.mlp_head(memory))
return memory
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
r"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,
layer_norm_eps=1e-5, batch_first=False, norm_first=False):
"""
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``.
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
"""
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm_first = norm_first
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = activation
def forward(self, src, src_mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x, attn_mask, key_padding_mask):
x = self.self_attn(x, x, x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x):
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class Context(nn.Module):
"""
This layer just does non-linear transformation(s)
"""
def __init__(self,
hidden_sizes=[50],
input_dim=None,
history_length=1,
action_dim=None,
obsr_dim=None,
device='cpu'
):
super(Context, self).__init__()
self.hidden_sizes = hidden_sizes
self.input_dim = input_dim
self.hist_length = history_length
self.device = device
self.action_dim = action_dim
self.obsr_dim = obsr_dim
self.recurrent = nn.GRU(self.input_dim,
self.hidden_sizes[0],
bidirectional=False,
batch_first=True,
num_layers=1)
def init_recurrent(self, bsize=None):
'''
init hidden states
Batch size can't be none
'''
# The order is (num_layers, minibatch_size, hidden_dim)
# LSTM ==> return (torch.zeros(1, bsize, self.hidden_sizes[0]),
# torch.zeros(1, bsize, self.hidden_sizes[0]))
return torch.zeros(1, bsize, self.hidden_sizes[0]).to(self.device)
def forward(self, data):
'''
pre_x : B * D where B is batch size and D is input_dim
pre_a : B * A where B is batch size and A is input_dim
previous_reward: B * 1 where B is batch size and 1 is input_dim
'''
previous_action, previous_reward, pre_x = data[0], data[1], data[2]
# first prepare data for LSTM
bsize, dim = previous_action.shape # previous_action is B* (history_len * D)
pacts = previous_action.view(bsize, -1, self.action_dim) # view(bsize, self.hist_length, -1)
prews = previous_reward.view(bsize, -1, 1) # reward dim is 1, view(bsize, self.hist_length, 1)
pxs = pre_x.view(bsize, -1, self.obsr_dim) # view(bsize, self.hist_length, -1)
pre_act_rew = torch.cat([pacts, prews, pxs], dim=-1) # input to LSTM is [action, reward]
# init lstm/gru
hidden = self.init_recurrent(bsize=bsize)
# lstm/gru
_, hidden = self.recurrent(pre_act_rew, hidden) # hidden is (1, B, hidden_size)
out = hidden.squeeze(0) # (1, B, hidden_size) ==> (B, hidden_size)
return out
| 24,186 | 42.817029 | 122 | py |
T2TL | T2TL-main/src/T2TL_pretrain.py |
import argparse
import time
import datetime
import torch
import torch_ac
import tensorboardX
import sys
import glob
from math import floor
import utils
from model import ACModel
from context_model import ContextACModel
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
## General parameters
parser.add_argument("--algo", default='ppo',
help="algorithm to use: a2c | ppo (REQUIRED)")
parser.add_argument("--env", default='Zones-25-v1',
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--ltl-sampler", default="Until_1_2_1_1",
help="the ltl formula template to sample from (default: DefaultSampler)")
parser.add_argument("--model", default=None,
help="name of the model (default: {ENV}_{SAMPLER}_{ALGO}_{TIME})")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default: 10)")
parser.add_argument("--save-interval", type=int, default=2,
help="number of updates between two saves (default: 10, 0 means no saving)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--frames", type=int, default=1*10**7,
help="number of frames of training (default: 2*10e8)")
parser.add_argument("--checkpoint-dir", default=None)
## Evaluation parameters
parser.add_argument("--eval", action="store_true", default=False,
help="evaluate the saved model (default: False)")
parser.add_argument("--eval-episodes", type=int, default=5,
help="number of episodes to evaluate on (default: 5)")
parser.add_argument("--eval-env", default=None,
help="name of the environment to train on (default: use the same \"env\" as training)")
parser.add_argument("--ltl-samplers-eval", default=None, nargs='+',
help="the ltl formula templates to sample from for evaluation (default: use the same \"ltl-sampler\" as training)")
parser.add_argument("--eval-procs", type=int, default=1,
help="number of processes (default: use the same \"procs\" as training)")
## Parameters for main algorithm
parser.add_argument("--epochs", type=int, default=10,
help="number of epochs for PPO (default: 4)")
parser.add_argument("--batch-size", type=int, default=1024,
help="batch size for PPO (default: 256)")
parser.add_argument("--frames-per-proc", type=int, default=4096,
help="number of frames per process before update (default: 5 for A2C and 128 for PPO)")
parser.add_argument("--discount", type=float, default=0.998,
help="discount factor (default: 0.99)")
parser.add_argument("--lr", type=float, default=0.0003,
help="learning rate (default: 0.0003)")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)")
parser.add_argument("--entropy-coef", type=float, default=0.003,
help="entropy term coefficient (default: 0.01)")
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="value loss term coefficient (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="maximum norm of gradient (default: 0.5)")
parser.add_argument("--optim-eps", type=float, default=1e-8,
help="Adam and RMSprop optimizer epsilon (default: 1e-8)")
parser.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer alpha (default: 0.99)")
parser.add_argument("--clip-eps", type=float, default=0.2,
help="clipping epsilon for PPO (default: 0.2)")
parser.add_argument("--ignoreLTL", action="store_true", default=False,
help="the network ignores the LTL input")
parser.add_argument("--noLTL", action="store_true", default=False,
help="the environment no longer has an LTL goal. --ignoreLTL must be specified concurrently.")
parser.add_argument("--progression-mode", default="full",
help="Full: uses LTL progression; partial: shows the propositions which progress or falsify the formula; none: only original formula is seen. ")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--gnn", default="Transformer", help="use gnn to model the LTL (only if ignoreLTL==True)")
parser.add_argument("--trans_layer", type=int, default=1, help="the number of Transformer layers need to use")
parser.add_argument("--int-reward", type=float, default=0.0, help="the intrinsic reward for LTL progression (default: 0.0)")
parser.add_argument("--pretrained-gnn", action="store_true", default=True, help="load a pre-trained LTL module.")
parser.add_argument("--dumb-ac", action="store_true", default=False, help="Use a single-layer actor-critic")
parser.add_argument("--freeze-ltl", action="store_true", default=False, help="Freeze the gradient updates of the LTL module")
# Transformer special parameters
parser.add_argument("--d_model", type=int, default=64, help="")
parser.add_argument("--nhead", type=int, default=8, help="")
parser.add_argument("--num_encoder_layers", type=int, default=4, help="")
parser.add_argument("--pool", type=str, default='mean', help="")
parser.add_argument("--dim_feedforward", type=int, default=256, help="")
parser.add_argument("--dropout", type=float, default=0.0, help="")
parser.add_argument("--d_out", type=int, default=16, help="")
parser.add_argument("--layer_norm_eps", type=float, default=1e-5, help="")
parser.add_argument("--TFixup", type=bool, default=True, help="")
# Context Special variable
parser.add_argument("--ContType", type=str, default='Transformer',
help="To choose which model to encode CONTEXT VARIABLE (e.g., RNN, Transformer)")
parser.add_argument("--use_cont", type=bool, default=True, help="")
parser.add_argument("--hist_length", type=int, default=8, help="")
parser.add_argument("--cont_dim", type=int, default=16, help="")
parser.add_argument("--cont_d_model", type=int, default=64, help="")
parser.add_argument("--cont_nhead", type=int, default=8, help="")
parser.add_argument("--cont_num_encoder_layers", type=int, default=2, help="")
parser.add_argument("--cont_pool", type=str, default='mean', help="")
parser.add_argument("--cont_dim_feedforward", type=int, default=256, help="")
parser.add_argument("--cont_d_out", type=int, default=16, help="")
# device
parser.add_argument("--cuda", type=str, default='cuda:0', help="")
parser.add_argument("--device", type=str, default='cuda:0', help="")
# additional desciption for test
parser.add_argument("--sth", type=str, default='None', help="")
args = parser.parse_args()
use_mem = args.recurrence > 1 # whether use memory or not
# Set run dir
date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") # '21-08-21-22-36-39'
gnn_name = args.gnn
if args.ignoreLTL:
gnn_name = "IgnoreLTL"
if args.dumb_ac:
gnn_name = gnn_name + "-dumb_ac"
if args.pretrained_gnn:
gnn_name = gnn_name + "-pretrained"
if args.freeze_ltl:
gnn_name = gnn_name + "-freeze_ltl"
if use_mem:
gnn_name = gnn_name + "-recurrence:%d"%(args.recurrence)
if args.gnn == 'Transformer':
default_model_name = f"{gnn_name}+TL2_{args.ltl_sampler}_{args.env}_seed:{args.seed}_n_encoder:{args.num_encoder_layers}_nhead:{args.nhead}_d_model:{args.d_model}_FFD:{args.dim_feedforward}_d_out:{args.d_out}_hist:{args.hist_length}_L_Cont:{args.cont_num_encoder_layers}_Init:{args.TFixup}_sth:{args.sth}"
else:
# 'RGCN_8x32_ROOT_SHARED_Until_1_2_1_1_Zones-5-v0_seed:1_epochs:10_bs:2048_fpp:4096_dsc:0.998_lr:0.0003_ent:0.003_clip:0.2_prog:full'
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_epochs:{args.epochs}_bs:{args.batch_size}_fpp:{args.frames_per_proc}_dsc:{args.discount}_lr:{args.lr}_ent:{args.entropy_coef}_clip:{args.clip_eps}_prog:{args.progression_mode}"
model_name = args.model or default_model_name
storage_dir = "storage" if args.checkpoint_dir is None else args.checkpoint_dir
model_dir = utils.get_model_dir(model_name, storage_dir)
pretrained_model_dir = None
if args.pretrained_gnn:
assert(args.progression_mode == "full")
# default_dir = f"symbol-storage/{args.gnn}-dumb_ac_{args.ltl_sampler}_Simple-LTL-Env-v0_seed:{args.seed}_*_prog:{args.progression_mode}/train"
# default_dir = f"../src/pretrain/{args.gnn}-dumb_ac_*_Simple-LTL-Env-v0_seed:{args.seed}_*_d_out:{args.d_out}/train"
default_dir = f"../src/pretrain/{args.gnn}-dumb_ac_*_Simple-LTL-Env-v0_seed:{args.seed}_n_encoder:{args.num_encoder_layers}_nhead:{args.nhead}_d_model:{args.d_model}_dim_feedforward:{args.dim_feedforward}_d_out:{args.d_out}_None/train"
print(default_dir)
model_dirs = glob.glob(default_dir)
if len(model_dirs) == 0:
raise Exception("Pretraining directory not found.")
elif len(model_dirs) > 1:
raise Exception("More than 1 candidate pretraining directory found.")
pretrained_model_dir = model_dirs[0]
# Load loggers and Tensorboard writer
txt_logger = utils.get_txt_logger(model_dir + "/train")
csv_file, csv_logger = utils.get_csv_logger(model_dir + "/train")
tb_writer = tensorboardX.SummaryWriter(model_dir + "/train")
utils.save_config(model_dir + "/train", args)
# Log command and all script arguments
txt_logger.info("{}\n".format(" ".join(sys.argv)))
txt_logger.info("{}\n".format(args)) # It will output the context of Namespace
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device(args.cuda if torch.cuda.is_available() else "cpu")
# device = torch.device('cpu')
txt_logger.info(f"Device: {device}\n") # Output the device (default is cpu)
# Load environments
envs = []
progression_mode = args.progression_mode
for i in range(args.procs): # load the env & progression_mode & LTL formula
# turn to utils/env.py
envs.append(utils.make_env(args.env, progression_mode, args.ltl_sampler, args.seed, args.int_reward, args.noLTL))
# Sync environments
envs[0].reset() # Add the agent to map & translate the LTL formula
txt_logger.info("Environments loaded\n")
# Load training status
try:
status = utils.get_status(model_dir + "/train", args)
except OSError:
status = {"num_frames": 0, "update": 0} # ??? the state of algorithm ?
txt_logger.info("Training status loaded.\n")
if pretrained_model_dir is not None:
try:
pretrained_status = utils.get_status(pretrained_model_dir, args)
except:
txt_logger.info("Failed to load pretrained model.\n")
exit(1)
# Load observations preprocessor-- build AST
using_gnn = (args.gnn != "GRU" and args.gnn != "LSTM" and args.gnn != "Transformer")
# turn to env/format.py
obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0], using_gnn, progression_mode)
if "vocab" in status and preprocess_obss.vocab is not None:
preprocess_obss.vocab.load_vocab(status["vocab"])
txt_logger.info("Observations preprocessor loaded.\n")
# Load model
if use_mem:
acmodel = RecurrentACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl)
else:
acmodel = ContextACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl, args)
if "model_state" in status:
acmodel.load_state_dict(status["model_state"])
txt_logger.info("Loading model from existing run.\n")
elif args.pretrained_gnn:
acmodel.load_pretrained_gnn(pretrained_status["model_state"])
txt_logger.info("Pretrained model loaded.\n")
acmodel.to(device)
txt_logger.info("Model loaded.\n")
txt_logger.info("{}\n".format(acmodel))
# Load algo
if args.algo == "a2c":
algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_alpha, args.optim_eps, preprocess_obss)
elif args.algo == "ppo":
algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss,
history_length=args.hist_length)
else:
raise ValueError("Incorrect algorithm name: {}".format(args.algo))
if "optimizer_state" in status:
algo.optimizer.load_state_dict(status["optimizer_state"])
txt_logger.info("Loading optimizer from existing run.\n")
txt_logger.info("Optimizer loaded.\n")
# init the evaluator
if args.eval:
eval_samplers = args.ltl_samplers_eval if args.ltl_samplers_eval else [args.ltl_sampler]
eval_env = args.eval_env if args.eval_env else args.env
eval_procs = args.eval_procs if args.eval_procs else args.procs
evals = []
for eval_sampler in eval_samplers:
evals.append(utils.Eval(eval_env, model_name, eval_sampler,
seed=args.seed, device=device, num_procs=eval_procs, ignoreLTL=args.ignoreLTL, progression_mode=progression_mode, gnn=args.gnn, dumb_ac = args.dumb_ac))
# Train model
num_frames = status["num_frames"] # num_frames:0
update = status["update"] # update:0
start_time = time.time()
while num_frames < args.frames:
# Update model parameters
update_start_time = time.time()
exps, logs1 = algo.collect_experiences()
# interacte with environmets (very important)
logs2 = algo.update_parameters(exps)
logs = {**logs1, **logs2}
update_end_time = time.time()
num_frames += logs["num_frames"]
update += 1
# Print logs
if update % args.log_interval == 0:
fps = logs["num_frames"]/(update_end_time - update_start_time)
duration = int(time.time() - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"])
average_reward_per_step = utils.average_reward_per_step(logs["return_per_episode"], logs["num_frames_per_episode"])
average_discounted_return = utils.average_discounted_return(logs["return_per_episode"], logs["num_frames_per_episode"], args.discount)
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
header = ["update", "frames", "FPS", "duration"]
data = [update, num_frames, fps, duration]
header += ["rreturn_" + key for key in rreturn_per_episode.keys()]
data += rreturn_per_episode.values()
header += ["average_reward_per_step", "average_discounted_return"]
data += [average_reward_per_step, average_discounted_return]
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["entropy", "value", "policy_loss", "value_loss", "grad_norm"]
data += [logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"]]
txt_logger.info(
"U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | ARPS: {:.3f} | ADR: {:.3f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}"
.format(*data))
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
if status["num_frames"] == 0:
csv_logger.writerow(header)
csv_logger.writerow(data)
csv_file.flush()
for field, value in zip(header, data):
tb_writer.add_scalar(field, value, num_frames)
# Save status
if args.save_interval > 0 and update % args.save_interval == 0:
status = {"num_frames": num_frames, "update": update,
"model_state": algo.acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict()}
if hasattr(preprocess_obss, "vocab") and preprocess_obss.vocab is not None:
status["vocab"] = preprocess_obss.vocab.vocab
utils.save_status(status, model_dir + "/train")
txt_logger.info("Status saved")
if args.eval:
# we send the num_frames to align the eval curves with the training curves on TB
for evalu in evals:
evalu.eval(num_frames, episodes=args.eval_episodes)
| 18,009 | 51.354651 | 313 | py |
T2TL | T2TL-main/src/env_model.py | import torch
import torch.nn as nn
from envs import *
from gym.envs.classic_control import PendulumEnv
def getEnvModel(env, obs_space):
env = env.unwrapped
if isinstance(env, ZonesEnv):
return ZonesEnvModel(obs_space)
# Add your EnvModel here...
# The default case (No environment observations) - SimpleLTLEnv uses this
return EnvModel(obs_space)
"""
This class is in charge of embedding the environment part of the observations.
Every environment has its own set of observations ('image', 'direction', etc) which is handeled
here by associated EnvModel subclass.
How to subclass this:
1. Call the super().__init__() from your init
2. In your __init__ after building the compute graph set the self.embedding_size appropriately
3. In your forward() method call the super().forward as the default case.
4. Add the if statement in the getEnvModel() method
"""
class EnvModel(nn.Module):
def __init__(self, obs_space):
super().__init__()
self.embedding_size = 0
def forward(self, obs):
return None
def size(self):
return self.embedding_size
class LetterEnvModel(EnvModel):
def __init__(self, obs_space):
super().__init__(obs_space)
if "image" in obs_space.keys():
n = obs_space["image"][0]
m = obs_space["image"][1]
k = obs_space["image"][2]
self.image_conv = nn.Sequential(
nn.Conv2d(k, 16, (2, 2)),
nn.ReLU(),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.ReLU()
)
self.embedding_size = (n-3)*(m-3)*64
def forward(self, obs):
if "image" in obs.keys():
x = obs.image.transpose(1, 3).transpose(2, 3)
x = self.image_conv(x)
x = x.reshape(x.shape[0], -1)
return x
return super().forward(obs)
class MinigridEnvModel(EnvModel):
def __init__(self, obs_space):
super().__init__(obs_space)
if "image" in obs_space.keys():
n = obs_space["image"][0]
m = obs_space["image"][1]
k = obs_space["image"][2]
self.image_conv = nn.Sequential(
nn.Conv2d(k, 16, (2, 2)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.ReLU()
)
self.embedding_size = ((n-1)//2-2)*((m-1)//2-2)*64
def forward(self, obs):
if "image" in obs.keys():
x = obs.image.transpose(1, 3).transpose(2, 3)
x = self.image_conv(x)
x = x.reshape(x.shape[0], -1)
return x
return super().forward(obs)
class ZonesEnvModel(EnvModel):
def __init__(self, obs_space):
super().__init__(obs_space)
if "image" in obs_space.keys():
n = obs_space["image"][0]
lidar_num_bins = 16
self.embedding_size = 64 #(n-12)//lidar_num_bins + 4
self.net_ = nn.Sequential(
nn.Linear(n, 128),
nn.ReLU(),
nn.Linear(128, self.embedding_size),
nn.ReLU()
)
# embedding_size = number of propositional lidars + 4 normal sensors
def forward(self, obs):
if "image" in obs.keys():
return self.net_(obs.image)
return super().forward(obs)
class PendulumEnvModel(EnvModel):
def __init__(self, obs_space):
super().__init__(obs_space)
if "image" in obs_space.keys():
self.net_ = nn.Sequential(
nn.Linear(obs_space["image"][0], 3),
nn.Tanh(),
# nn.Linear(3, 3),
# nn.Tanh()
)
self.embedding_size = 3
def forward(self, obs):
if "image" in obs.keys():
x = obs.image
# x = torch.cat((x, x * x), 1)
x = self.net_(x)
return x
return super().forward(obs)
| 4,146 | 28.204225 | 98 | py |
T2TL | T2TL-main/src/model.py | """
This is the description of the deep NN currently being used.
It is a small CNN for the features with an GRU encoding of the LTL task.
The features and LTL are preprocessed by utils.format.get_obss_preprocessor(...) function:
- In that function, I transformed the LTL tuple representation into a text representation:
- Input: ('until',('not','a'),('and', 'b', ('until',('not','c'),'d')))
- output: ['until', 'not', 'a', 'and', 'b', 'until', 'not', 'c', 'd']
Each of those tokens get a one-hot embedding representation by the utils.format.Vocabulary class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import torch_ac
import copy
from gym.spaces import Box, Discrete
from gnns.graphs.GCN import *
from gnns.graphs.GNN import GNNMaker
from env_model import getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
class BasicACModel(nn.Module, torch_ac.ACModel):
def __init__(self, env, obs_space, action_space, ignoreLTL, gnn_type, dumb_ac, freeze_ltl, args):
super().__init__()
# Decide which components are enabled
self.use_progression_info = "progress_info" in obs_space
self.use_text = not ignoreLTL and (gnn_type == "GRU" or gnn_type == "LSTM") and "text" in obs_space
self.use_ast = not ignoreLTL and ("GCN" in gnn_type) and "text" in obs_space # True
self.use_trans = not ignoreLTL and ("Transformer" in gnn_type) and "text" in obs_space # True
self.use_dfa = not ignoreLTL and ("DFA" in gnn_type) and "text" in obs_space # True
self.gnn_type = gnn_type
self.device = torch.device(args.cuda)
self.action_space = action_space
self.dumb_ac = dumb_ac
self.context = False
self.freeze_pretrained_params = freeze_ltl
if self.freeze_pretrained_params:
print("Freezing the LTL module.")
self.env_model = getEnvModel(env, obs_space)
# Resize image embedding
self.embedding_size = self.env_model.size() # 64
print("embedding size:", self.embedding_size)
if self.use_text or self.use_ast or self.use_progression_info or self.use_trans or self.use_dfa:
self.embedding_size += args.d_out # 96
if self.dumb_ac:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size, self.action_space)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 1)
)
else:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size, self.action_space, hiddens=[64, 64, 64],
activation=nn.ReLU())
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
# Initialize parameters correctly
self.apply(init_params)
def forward(self, obs):
embedding = self.env_model(obs) # shape = torch.Size([16, 64])
embedding = torch.cat((embedding, obs.text), dim=-1)
# print(embedding[:, -4:])
# Actor
dist = self.actor(embedding)
# Critic
x = self.critic(embedding)
value = x.squeeze(1)
return dist, value
def load_pretrained_gnn(self, model_state):
# We delete all keys relating to the actor/critic.
new_model_state = model_state.copy()
for key in model_state.keys():
if key.find("actor") != -1 or key.find("critic") != -1:
del new_model_state[key]
self.load_state_dict(new_model_state, strict=False)
if self.freeze_pretrained_params:
target = self.text_rnn if self.gnn_type == "GRU" or self.gnn_type == "LSTM" else self.gnn
for param in target.parameters():
param.requires_grad = False
class ACModel(nn.Module, torch_ac.ACModel):
def __init__(self, env, obs_space, action_space, ignoreLTL, gnn_type, dumb_ac, freeze_ltl, args):
super().__init__()
# Decide which components are enabled
self.use_progression_info = "progress_info" in obs_space
self.use_text = not ignoreLTL and (gnn_type == "GRU" or gnn_type == "LSTM") and "text" in obs_space
self.use_ast = not ignoreLTL and ("GCN" in gnn_type) and "text" in obs_space # True
self.use_trans = not ignoreLTL and ("Transformer" in gnn_type) and "text" in obs_space # True
self.gnn_type = gnn_type
self.device = torch.device(args.cuda)
self.action_space = action_space
self.dumb_ac = dumb_ac
self.context = False
self.freeze_pretrained_params = freeze_ltl
if self.freeze_pretrained_params:
print("Freezing the LTL module.")
self.env_model = getEnvModel(env, obs_space)
# Define text embedding
if self.use_progression_info:
self.text_embedding_size = 32
self.simple_encoder = nn.Sequential(
nn.Linear(obs_space["progress_info"], 64),
nn.Tanh(),
nn.Linear(64, self.text_embedding_size),
nn.Tanh()
).to(self.device)
print("Linear encoder Number of parameters:", sum(p.numel() for p in self.simple_encoder.parameters() if p.requires_grad))
elif self.use_text:
self.word_embedding_size = 32
self.text_embedding_size = args.gnn_out
if self.gnn_type == "GRU":
self.text_rnn = GRUModel(obs_space["text"], self.word_embedding_size, 16, self.text_embedding_size).to(self.device)
else:
assert(self.gnn_type == "LSTM")
self.text_rnn = LSTMModel(obs_space["text"], self.word_embedding_size, 16, self.text_embedding_size).to(self.device)
print("RNN Number of parameters:", sum(p.numel() for p in self.text_rnn.parameters() if p.requires_grad))
elif self.use_ast:
hidden_dim = 32
self.text_embedding_size = 32
self.gnn = GNNMaker(self.gnn_type, obs_space["text"], self.text_embedding_size).to(self.device)
print("GNN Number of parameters:", sum(p.numel() for p in self.gnn.parameters() if p.requires_grad))
elif self.use_trans:
self.word_embedding_size = 512
self.text_embedding_size = args.d_out
self.ltl2transformer = TransfomerSyn(obs_space["text"], self.word_embedding_size, self.text_embedding_size, 'mean' , args)
print("Transformer Number of parameters:", sum(p.numel() for p in self.ltl2transformer.parameters() if p.requires_grad))
# Resize image embedding
self.embedding_size = self.env_model.size() # 64
print("embedding size:", self.embedding_size)
if self.use_text or self.use_ast or self.use_progression_info or self.use_trans:
self.embedding_size += self.text_embedding_size # 96
if self.dumb_ac:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size, self.action_space)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 1)
)
else:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size, self.action_space, hiddens=[64, 64, 64], activation=nn.ReLU())
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
# Initialize parameters correctly
self.apply(init_params)
if self.use_trans and args.TFixup:
self.ltl2transformer.init_by_TFixup(args)
def forward(self, obs):
embedding = self.env_model(obs) # shape = torch.Size([16, 64])
if self.use_progression_info:
embed_ltl = self.simple_encoder(obs.progress_info)
embedding = torch.cat((embedding, embed_ltl), dim=1) if embedding is not None else embed_ltl
# Adding Text
elif self.use_text:
embed_text = self.text_rnn(obs.text)
embedding = torch.cat((embedding, embed_text), dim=1) if embedding is not None else embed_text
# Adding GNN
elif self.use_ast:
embed_gnn = self.gnn(obs.text) # shape = torch.Size([16, 32])
embedding = torch.cat((embedding, embed_gnn), dim=1) if embedding is not None else embed_gnn # shape = torch.Size([16, 96])
elif self.use_trans:
embed_transformer = self.ltl2transformer(obs.text)
embedding = torch.cat((embedding, embed_transformer), dim=1) if embedding is not None else embed_transformer
# Actor
dist = self.actor(embedding)
# Critic
x = self.critic(embedding)
value = x.squeeze(1)
return dist, value
def load_pretrained_gnn(self, model_state):
# We delete all keys relating to the actor/critic.
new_model_state = model_state.copy()
for key in model_state.keys():
if key.find("actor") != -1 or key.find("critic") != -1: # ??? key.find()?
del new_model_state[key]
self.load_state_dict(new_model_state, strict=False)
if self.freeze_pretrained_params:
target = self.text_rnn if self.gnn_type == "GRU" or self.gnn_type == "LSTM" else self.gnn
for param in target.parameters():
param.requires_grad = False
class TransfomerSyn(nn.Module):
def __init__(self, obs_size, d_model, d_out, pool, args):
super(TransfomerSyn, self).__init__()
self.embedded = nn.Embedding(obs_size, args.d_model)
self.transformer = TransformerEncoderModel(d_model=args.d_model, nhead=args.nhead,
num_encoder_layers=args.num_encoder_layers,
pool=args.pool, dim_feedforward=args.dim_feedforward,
dropout=args.dropout, d_out=args.d_out,
layer_norm_eps=args.layer_norm_eps)
def forward(self, text):
embed_text = self.embedded(text)
feature = self.transformer(embed_text)
return feature
def init_by_TFixup(self, args): # todo:debug
# for k, v in self.transformer.named_parameters():
# print(k, v, v.shape)
for p in self.embedded.parameters():
if p.dim() > 1:
torch.nn.init.normal_(p, 0, args.d_model ** (- 1. / 2.))
temp_state_dic = {}
for name, param in self.embedded.named_parameters():
if 'weight' in name:
temp_state_dic[name] = ((9 * args.num_encoder_layers) ** (- 1. / 4.)) * param
for name in self.embedded.state_dict():
if name not in temp_state_dic:
temp_state_dic[name] = self.embedded.state_dict()[name]
self.embedded.load_state_dict(temp_state_dic)
temp_state_dic = {}
for name, param in self.transformer.named_parameters():
if any(s in name for s in ["linear1.weight", "linear2.weight", "self_attn.out_proj.weight"]):
temp_state_dic[name] = (0.67 * (args.num_encoder_layers) ** (- 1. / 4.)) * param
elif "self_attn.in_proj_weight" in name:
temp_state_dic[name] = (0.67 * (args.num_encoder_layers) ** (- 1. / 4.)) * (param * (2 ** 0.5))
for name in self.transformer.state_dict():
if name not in temp_state_dic:
temp_state_dic[name] = self.transformer.state_dict()[name]
self.transformer.load_state_dict(temp_state_dic)
class LSTMModel(nn.Module):
def __init__(self, obs_size, word_embedding_size=32, hidden_dim=32, text_embedding_size=32):
super().__init__()
# For all our experiments we want the embedding to be a fixed size so we can "transfer".
self.word_embedding = nn.Embedding(obs_size, word_embedding_size)
self.lstm = nn.LSTM(word_embedding_size, hidden_dim, num_layers=2, batch_first=True, bidirectional=True)
self.output_layer = nn.Linear(2*hidden_dim, text_embedding_size)
def forward(self, text):
hidden, _ = self.lstm(self.word_embedding(text))
return self.output_layer(hidden[:, -1, :])
class GRUModel(nn.Module):
def __init__(self, obs_size, word_embedding_size=32, hidden_dim=32, text_embedding_size=32):
super().__init__()
self.word_embedding = nn.Embedding(obs_size, word_embedding_size)
# word_embedding_size = 32, hidden_dim = 16
self.gru = nn.GRU(word_embedding_size, hidden_dim, num_layers=2, batch_first=True, bidirectional=True)
self.output_layer = nn.Linear(2*hidden_dim, text_embedding_size)
def forward(self, text):
# hidden_shape: [16, 9, 32] _shape: [4, 16, 16]
hidden, _ = self.gru(self.word_embedding(text))
return self.output_layer(hidden[:, -1, :])
class TransformerEncoderModel(nn.Module):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 1, pool: str = 'mean',
dim_feedforward: int = 2048, dropout: float = 0.1, d_out: int = 8, activation = F.relu,
layer_norm_eps: float = 1e-5, batch_first: bool = True, norm_first: bool = False):
"""
:param d_model: the number of expected features in the encoder/decoder inputs (default=512).
:param nhead: the number of heads in the multiheadattention models (default=8).
:param num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
:param dim_feedforward: the dimension of the feedforward network model (default=2048).
:param dropout: the dropout value (default=0.1).
:param activation: the activation function of encoder/decoder intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
:param layer_norm_eps: the eps value in layer normalization components (default=1e-5).
:param batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
:param norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
other attention and feedforward operations, otherwise after. Default: ``False``
Examples::
>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
>>> src = torch.rand((10, 32, 512))
>>> out = transformer_model(src)
"""
super(TransformerEncoderModel, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.batch_first = batch_first
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, layer_norm_eps, batch_first, norm_first)
encoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(d_model),
nn.Linear(d_model, d_out)
)
self._reset_parameters()
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""
Args:
src: the sequence to the encoder (required).
src_mask: the additive mask for the src sequence (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
Shape:
- src: :math:`(S, N, E)`, `(N, S, E)` if batch_first.
- src_mask: :math:`(S, S)`.
- src_key_padding_mask: :math:`(N, S)`.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
"""
memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
memory = memory.mean(dim=1) if self.pool == 'mean' else memory[:, -1, :]
memory = self.to_latent(memory)
memory = torch.tanh(self.mlp_head(memory))
return memory
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
r"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,
layer_norm_eps=1e-5, batch_first=False, norm_first=False):
"""
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``.
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
"""
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm_first = norm_first
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = activation
def forward(self, src, src_mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x, attn_mask, key_padding_mask):
x = self.self_attn(x, x, x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x):
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
| 22,185 | 42.247563 | 136 | py |
T2TL | T2TL-main/src/train_PreGNNAgent.py |
import argparse
import time
import datetime
import torch
import torch_ac
import tensorboardX
import sys
import glob
from math import floor
import utils
from model import ACModel
from recurrent_model import RecurrentACModel
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
## General parameters
parser.add_argument("--algo", default='ppo',
help="algorithm to use: a2c | ppo (REQUIRED)")
parser.add_argument("--env", default='Zones-25-v1',
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--ltl-sampler", default="Until_1_2_1_1",
help="the ltl formula template to sample from (default: DefaultSampler)")
parser.add_argument("--model", default=None,
help="name of the model (default: {ENV}_{SAMPLER}_{ALGO}_{TIME})")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default: 10)")
parser.add_argument("--save-interval", type=int, default=2,
help="number of updates between two saves (default: 10, 0 means no saving)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--frames", type=int, default=1*10**7,
help="number of frames of training (default: 2*10e8)")
parser.add_argument("--checkpoint-dir", default=None)
## Evaluation parameters
parser.add_argument("--eval", action="store_true", default=False,
help="evaluate the saved model (default: False)")
parser.add_argument("--eval-episodes", type=int, default=5,
help="number of episodes to evaluate on (default: 5)")
parser.add_argument("--eval-env", default=None,
help="name of the environment to train on (default: use the same \"env\" as training)")
parser.add_argument("--ltl-samplers-eval", default=None, nargs='+',
help="the ltl formula templates to sample from for evaluation (default: use the same \"ltl-sampler\" as training)")
parser.add_argument("--eval-procs", type=int, default=1,
help="number of processes (default: use the same \"procs\" as training)")
## Parameters for main algorithm
parser.add_argument("--epochs", type=int, default=10,
help="number of epochs for PPO (default: 4)")
parser.add_argument("--batch-size", type=int, default=1024,
help="batch size for PPO (default: 256)")
parser.add_argument("--frames-per-proc", type=int, default=4096,
help="number of frames per process before update (default: 5 for A2C and 128 for PPO)")
parser.add_argument("--discount", type=float, default=0.998,
help="discount factor (default: 0.99)")
parser.add_argument("--lr", type=float, default=0.0003,
help="learning rate (default: 0.0003)")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)")
parser.add_argument("--entropy-coef", type=float, default=0.003,
help="entropy term coefficient (default: 0.01)")
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="value loss term coefficient (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="maximum norm of gradient (default: 0.5)")
parser.add_argument("--optim-eps", type=float, default=1e-8,
help="Adam and RMSprop optimizer epsilon (default: 1e-8)")
parser.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer alpha (default: 0.99)")
parser.add_argument("--clip-eps", type=float, default=0.2,
help="clipping epsilon for PPO (default: 0.2)")
parser.add_argument("--ignoreLTL", action="store_true", default=False,
help="the network ignores the LTL input")
parser.add_argument("--noLTL", action="store_true", default=False,
help="the environment no longer has an LTL goal. --ignoreLTL must be specified concurrently.")
parser.add_argument("--progression-mode", default="full",
help="Full: uses LTL progression; partial: shows the propositions which progress or falsify the formula; none: only original formula is seen. ")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--gnn", default="RGCN_8x32_ROOT_SHARED", help="use gnn to model the LTL (only if ignoreLTL==True)")
parser.add_argument("--trans_layer", type=int, default=1, help="the number of Transformer layers need to use")
parser.add_argument("--int-reward", type=float, default=0.0, help="the intrinsic reward for LTL progression (default: 0.0)")
parser.add_argument("--pretrained-gnn", action="store_true", default=True, help="load a pre-trained LTL module.")
parser.add_argument("--dumb-ac", action="store_true", default=False, help="Use a single-layer actor-critic")
parser.add_argument("--freeze-ltl", action="store_true", default=False, help="Freeze the gradient updates of the LTL module")
# Transformer special parameters
parser.add_argument("--d_model", type=int, default=64, help="")
parser.add_argument("--nhead", type=int, default=8, help="")
parser.add_argument("--num_encoder_layers", type=int, default=2, help="")
parser.add_argument("--pool", type=str, default='mean', help="")
parser.add_argument("--dim_feedforward", type=int, default=256, help="")
parser.add_argument("--dropout", type=float, default=0.0, help="")
parser.add_argument("--d_out", type=int, default=16, help="")
parser.add_argument("--layer_norm_eps", type=float, default=1e-5, help="")
parser.add_argument("--cuda", type=str, default='cuda:0', help="")
# additional desciption for test
parser.add_argument("--sth", type=str, default='None', help="")
args = parser.parse_args()
use_mem = args.recurrence > 1 # whether use memory or not
# Set run dir
date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") # '21-08-21-22-36-39'
gnn_name = args.gnn
if args.ignoreLTL:
gnn_name = "IgnoreLTL"
if args.dumb_ac:
gnn_name = gnn_name + "-dumb_ac"
if args.pretrained_gnn:
gnn_name = gnn_name + "-pretrained"
if args.freeze_ltl:
gnn_name = gnn_name + "-freeze_ltl"
if use_mem:
gnn_name = gnn_name + "-recurrence:%d"%(args.recurrence)
if args.gnn == 'Transformer':
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_n_encoder:{args.num_encoder_layers}_nhead:{args.nhead}_d_model:{args.d_model}_dim_feedforward:{args.dim_feedforward}_dropout:{args.dropout}_sth:{args.sth}"
else:
# 'RGCN_8x32_ROOT_SHARED_Until_1_2_1_1_Zones-5-v0_seed:1_epochs:10_bs:2048_fpp:4096_dsc:0.998_lr:0.0003_ent:0.003_clip:0.2_prog:full'
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_epochs:{args.epochs}_bs:{args.batch_size}_fpp:{args.frames_per_proc}_dsc:{args.discount}_lr:{args.lr}_ent:{args.entropy_coef}_clip:{args.clip_eps}_prog:{args.progression_mode}"
model_name = args.model or default_model_name
storage_dir = "storage" if args.checkpoint_dir is None else args.checkpoint_dir
model_dir = utils.get_model_dir(model_name, storage_dir)
pretrained_model_dir = None
if args.pretrained_gnn:
assert(args.progression_mode == "full")
default_dir = f"../src/pretrain/{args.gnn}-dumb_ac_*_Simple-LTL-Env-v0_seed:{args.seed}_*_prog:{args.progression_mode}_*/train"
# default_dir = f"../src/pretrain/{args.gnn}-dumb_ac_*_Simple-LTL-Env-v0_seed:{args.seed}_*_d_out:{args.d_out}/train"
print(default_dir)
model_dirs = glob.glob(default_dir)
if len(model_dirs) == 0:
raise Exception("Pretraining directory not found.")
elif len(model_dirs) > 1:
raise Exception("More than 1 candidate pretraining directory found.")
pretrained_model_dir = model_dirs[0]
# Load loggers and Tensorboard writer
txt_logger = utils.get_txt_logger(model_dir + "/train")
csv_file, csv_logger = utils.get_csv_logger(model_dir + "/train")
tb_writer = tensorboardX.SummaryWriter(model_dir + "/train")
utils.save_config(model_dir + "/train", args)
# Log command and all script arguments
txt_logger.info("{}\n".format(" ".join(sys.argv)))
txt_logger.info("{}\n".format(args)) # It will output the context of Namespace
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device(args.cuda if torch.cuda.is_available() else "cpu")
# device = torch.device('cpu')
txt_logger.info(f"Device: {device}\n") # Output the device (default is cpu)
# Load environments
envs = []
progression_mode = args.progression_mode
for i in range(args.procs): # load the env & progression_mode & LTL formula
# turn to utils/env.py
envs.append(utils.make_env(args.env, progression_mode, args.ltl_sampler, args.seed, args.int_reward, args.noLTL))
# Sync environments
envs[0].reset() # Add the agent to map & translate the LTL formula
txt_logger.info("Environments loaded\n")
# Load training status
try:
status = utils.get_status(model_dir + "/train", args)
except OSError:
status = {"num_frames": 0, "update": 0} # ??? the state of algorithm ?
txt_logger.info("Training status loaded.\n")
if pretrained_model_dir is not None:
try:
pretrained_status = utils.get_status(pretrained_model_dir, args)
except:
txt_logger.info("Failed to load pretrained model.\n")
exit(1)
# Load observations preprocessor-- build AST
using_gnn = (args.gnn != "GRU" and args.gnn != "LSTM" and args.gnn != "Transformer")
# turn to env/format.py
obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0], using_gnn, progression_mode)
if "vocab" in status and preprocess_obss.vocab is not None:
preprocess_obss.vocab.load_vocab(status["vocab"])
txt_logger.info("Observations preprocessor loaded.\n")
# Load model
if use_mem:
acmodel = RecurrentACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl)
else:
acmodel = ACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl, args)
if "model_state" in status:
acmodel.load_state_dict(status["model_state"])
txt_logger.info("Loading model from existing run.\n")
elif args.pretrained_gnn:
acmodel.load_pretrained_gnn(pretrained_status["model_state"])
txt_logger.info("Pretrained model loaded.\n")
acmodel.to(device)
txt_logger.info("Model loaded.\n")
txt_logger.info("{}\n".format(acmodel))
# Load algo
if args.algo == "a2c":
algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_alpha, args.optim_eps, preprocess_obss)
elif args.algo == "ppo":
algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss)
else:
raise ValueError("Incorrect algorithm name: {}".format(args.algo))
if "optimizer_state" in status:
algo.optimizer.load_state_dict(status["optimizer_state"])
txt_logger.info("Loading optimizer from existing run.\n")
txt_logger.info("Optimizer loaded.\n")
# init the evaluator
if args.eval:
eval_samplers = args.ltl_samplers_eval if args.ltl_samplers_eval else [args.ltl_sampler]
eval_env = args.eval_env if args.eval_env else args.env
eval_procs = args.eval_procs if args.eval_procs else args.procs
evals = []
for eval_sampler in eval_samplers:
evals.append(utils.Eval(eval_env, model_name, eval_sampler,
seed=args.seed, device=device, num_procs=eval_procs, ignoreLTL=args.ignoreLTL, progression_mode=progression_mode, gnn=args.gnn, dumb_ac = args.dumb_ac))
# Train model
num_frames = status["num_frames"] # num_frames:0
update = status["update"] # update:0
start_time = time.time()
while num_frames < args.frames:
# Update model parameters
update_start_time = time.time()
exps, logs1 = algo.collect_experiences()
# interacte with environmets (very important)
logs2 = algo.update_parameters(exps)
logs = {**logs1, **logs2}
update_end_time = time.time()
num_frames += logs["num_frames"]
update += 1
# Print logs
if update % args.log_interval == 0:
fps = logs["num_frames"]/(update_end_time - update_start_time)
duration = int(time.time() - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"])
average_reward_per_step = utils.average_reward_per_step(logs["return_per_episode"], logs["num_frames_per_episode"])
average_discounted_return = utils.average_discounted_return(logs["return_per_episode"], logs["num_frames_per_episode"], args.discount)
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
header = ["update", "frames", "FPS", "duration"]
data = [update, num_frames, fps, duration]
header += ["rreturn_" + key for key in rreturn_per_episode.keys()]
data += rreturn_per_episode.values()
header += ["average_reward_per_step", "average_discounted_return"]
data += [average_reward_per_step, average_discounted_return]
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["entropy", "value", "policy_loss", "value_loss", "grad_norm"]
data += [logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"]]
txt_logger.info(
"U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | ARPS: {:.3f} | ADR: {:.3f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}"
.format(*data))
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
if status["num_frames"] == 0:
csv_logger.writerow(header)
csv_logger.writerow(data)
csv_file.flush()
for field, value in zip(header, data):
tb_writer.add_scalar(field, value, num_frames)
# Save status
if args.save_interval > 0 and update % args.save_interval == 0:
status = {"num_frames": num_frames, "update": update,
"model_state": algo.acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict()}
if hasattr(preprocess_obss, "vocab") and preprocess_obss.vocab is not None:
status["vocab"] = preprocess_obss.vocab.vocab
utils.save_status(status, model_dir + "/train")
txt_logger.info("Status saved")
if args.eval:
# we send the num_frames to align the eval curves with the training curves on TB
for evalu in evals:
evalu.eval(num_frames, episodes=args.eval_episodes)
| 16,595 | 49.443769 | 265 | py |
T2TL | T2TL-main/src/transEncoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
class ContextTransformer(nn.Module):
def __init__(self, obs_size, obsr_dim, d_model, d_out, pool, args, context=False):
super(ContextTransformer, self).__init__()
self.context = context
self.obsr_dim = obsr_dim
self.action_dim = 2 # action dim
self.device = torch.device(args.device)
if self.context:
self.embed_linear = nn.Linear(obs_size, args.cont_d_model//2)
self.transformer = TransformerEncoderModel(d_model=args.cont_d_model//2, nhead=args.cont_nhead,
num_encoder_layers=args.cont_num_encoder_layers//2,
pool=args.cont_pool, dim_feedforward=args.cont_dim_feedforward//2,
dropout=args.dropout, d_out=args.cont_d_out,
layer_norm_eps=args.layer_norm_eps)
else:
self.embedded = nn.Embedding(obs_size, args.d_model)
self.transformer = TransformerEncoderModel(d_model=args.d_model, nhead=args.nhead,
num_encoder_layers=args.num_encoder_layers,
pool=args.pool, dim_feedforward=args.dim_feedforward,
dropout=args.dropout, d_out=args.d_out,
layer_norm_eps=args.layer_norm_eps)
def forward(self, data):
previous_action, previous_reward, pre_x = data[0], data[1], data[2]
bsize, dim = previous_action.shape # previous_action is B* (history_len * D)
pacts = previous_action.view(bsize, -1, self.action_dim) # view(bsize, self.hist_length, -1)
prews = previous_reward.view(bsize, -1, 1) # reward dim is 1, view(bsize, self.hist_length, 1)
pxs = pre_x.view(bsize, -1, self.obsr_dim) # view(bsize, self.hist_length, -1)
pre_act_rew = torch.cat([pacts, prews, pxs], dim=-1).to(self.device)
if self.context:
embed_text = self.embed_linear(pre_act_rew)
else:
embed_text = self.embedded(data)
feature = self.transformer(embed_text)
return feature
def init_by_TFixup(self, args):
for p in self.embed_linear.parameters():
if p.dim() > 1:
torch.nn.init.normal_(p, 0, args.cont_d_model ** (- 1. / 2.))
temp_state_dic = {}
for name, param in self.embed_linear.named_parameters():
if 'weight' in name:
temp_state_dic[name] = ((9 * args.cont_num_encoder_layers) ** (- 1. / 4.)) * param
for name in self.embed_linear.state_dict():
if name not in temp_state_dic:
temp_state_dic[name] = self.embed_linear.state_dict()[name]
self.embed_linear.load_state_dict(temp_state_dic)
temp_state_dic = {}
for name, param in self.transformer.named_parameters():
if any(s in name for s in ["linear1.weight", "linear2.weight", "self_attn.out_proj.weight"]):
temp_state_dic[name] = (0.67 * (args.cont_num_encoder_layers) ** (- 1. / 4.)) * param
elif "self_attn.in_proj_weight" in name:
temp_state_dic[name] = (0.67 * (args.cont_num_encoder_layers) ** (- 1. / 4.)) * (param * (2 ** 0.5))
for name in self.transformer.state_dict():
if name not in temp_state_dic:
temp_state_dic[name] = self.transformer.state_dict()[name]
self.transformer.load_state_dict(temp_state_dic)
class LSTMModel(nn.Module):
def __init__(self, obs_size, word_embedding_size=32, hidden_dim=32, text_embedding_size=32):
super().__init__()
# For all our experiments we want the embedding to be a fixed size so we can "transfer".
self.word_embedding = nn.Embedding(obs_size, word_embedding_size)
self.lstm = nn.LSTM(word_embedding_size, hidden_dim, num_layers=2, batch_first=True, bidirectional=True)
self.output_layer = nn.Linear(2 * hidden_dim, text_embedding_size)
def forward(self, text):
hidden, _ = self.lstm(self.word_embedding(text))
return self.output_layer(hidden[:, -1, :])
class GRUModel(nn.Module):
def __init__(self, obs_size, word_embedding_size=32, hidden_dim=32, text_embedding_size=32):
super().__init__()
self.word_embedding = nn.Embedding(obs_size, word_embedding_size)
# word_embedding_size = 32, hidden_dim = 16
self.gru = nn.GRU(word_embedding_size, hidden_dim, num_layers=2, batch_first=True, bidirectional=True)
self.output_layer = nn.Linear(2 * hidden_dim, text_embedding_size)
def forward(self, text):
# hidden_shape: [16, 9, 32] _shape: [4, 16, 16]
hidden, _ = self.gru(self.word_embedding(text))
return self.output_layer(hidden[:, -1, :])
class TransformerEncoderModel(nn.Module):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 1, pool: str = 'mean',
dim_feedforward: int = 2048, dropout: float = 0.1, d_out: int = 8, activation=F.relu,
layer_norm_eps: float = 1e-5, batch_first: bool = True, norm_first: bool = False):
"""
:param d_model: the number of expected features in the encoder/decoder inputs (default=512).
:param nhead: the number of heads in the multiheadattention models (default=8).
:param num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
:param dim_feedforward: the dimension of the feedforward network model (default=2048).
:param dropout: the dropout value (default=0.1).
:param activation: the activation function of encoder/decoder intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
:param layer_norm_eps: the eps value in layer normalization components (default=1e-5).
:param batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
:param norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
other attention and feedforward operations, otherwise after. Default: ``False``
Examples::
>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
>>> src = torch.rand((10, 32, 512))
>>> out = transformer_model(src)
"""
super(TransformerEncoderModel, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.batch_first = batch_first
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, layer_norm_eps, batch_first, norm_first)
encoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(d_model),
nn.Linear(d_model, d_out)
)
self._reset_parameters()
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""
Args:
src: the sequence to the encoder (required).
src_mask: the additive mask for the src sequence (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
Shape:
- src: :math:`(S, N, E)`, `(N, S, E)` if batch_first.
- src_mask: :math:`(S, S)`.
- src_key_padding_mask: :math:`(N, S)`.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
"""
memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
memory = memory.mean(dim=1) if self.pool == 'mean' else memory[:, -1, :]
memory = self.to_latent(memory)
memory = torch.tanh(self.mlp_head(memory))
return memory
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
r"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,
layer_norm_eps=1e-5, batch_first=True, norm_first=False):
"""
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``.
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
"""
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm_first = norm_first
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = activation
def forward(self, src, src_mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x, attn_mask, key_padding_mask):
x = self.self_attn(x, x, x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x):
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class Context(nn.Module):
"""
This layer just does non-linear transformation(s)
"""
def __init__(self,
hidden_sizes=[50],
input_dim=None,
history_length=1,
action_dim=None,
obsr_dim=None,
device='cpu'
):
super(Context, self).__init__()
self.hidden_sizes = hidden_sizes
self.input_dim = input_dim
self.hist_length = history_length
self.device = device
self.action_dim = action_dim
self.obsr_dim = obsr_dim
self.recurrent = nn.GRU(self.input_dim,
self.hidden_sizes[0],
bidirectional=False,
batch_first=True,
num_layers=1)
def init_recurrent(self, bsize=None):
'''
init hidden states
Batch size can't be none
'''
# The order is (num_layers, minibatch_size, hidden_dim)
# LSTM ==> return (torch.zeros(1, bsize, self.hidden_sizes[0]),
# torch.zeros(1, bsize, self.hidden_sizes[0]))
return torch.zeros(1, bsize, self.hidden_sizes[0]).to(self.device)
def forward(self, data):
'''
pre_x : B * D where B is batch size and D is input_dim
pre_a : B * A where B is batch size and A is input_dim
previous_reward: B * 1 where B is batch size and 1 is input_dim
'''
previous_action, previous_reward, pre_x = data[0], data[1], data[2]
# first prepare data for LSTM
bsize, dim = previous_action.shape # previous_action is B* (history_len * D)
pacts = previous_action.view(bsize, -1, self.action_dim) # view(bsize, self.hist_length, -1)
prews = previous_reward.view(bsize, -1, 1) # reward dim is 1, view(bsize, self.hist_length, 1)
pxs = pre_x.view(bsize, -1, self.obsr_dim) # view(bsize, self.hist_length, -1)
pre_act_rew = torch.cat([pacts, prews, pxs], dim=-1) # input to LSTM is [action, reward]
# init lstm/gru
hidden = self.init_recurrent(bsize=bsize)
# lstm/gru
_, hidden = self.recurrent(pre_act_rew, hidden) # hidden is (1, B, hidden_size)
out = hidden.squeeze(0) # (1, B, hidden_size) ==> (B, hidden_size)
return out
| 15,896 | 43.90678 | 121 | py |
T2TL | T2TL-main/src/test_safety.py | import argparse
import time
import sys
import numpy as np
import glfw
import utils
import torch
import gym
import safety_gym
import ltl_wrappers
import ltl_progression
from gym import wrappers, logger
from envs.safety import safety_wrappers
class RandomAgent(object):
"""This agent picks actions randomly"""
def __init__(self, action_space):
self.action_space = action_space
def get_action(self, obs):
return self.action_space.sample()
class PlayAgent(object):
"""
This agent allows user to play with Safety's Point agent.
Use the UP and DOWN arrows to move forward and back and
use '<' and '>' to rotate the agent.
"""
def __init__(self, env):
self.env = env
self.action_space = env.action_space
self.prev_act = np.array([0, 0])
self.last_obs = None
def get_action(self, obs):
# obs = obs["features"]
key = self.env.key_pressed
if(key == glfw.KEY_COMMA):
current = np.array([0, 0.4])
elif(key == glfw.KEY_PERIOD):
current = np.array([0, -0.4])
elif(key == glfw.KEY_UP):
current = np.array([0.1, 0])
elif(key == glfw.KEY_DOWN):
current = np.array([-0.1, 0])
elif(key == -1): # This is glfw.RELEASE
current = np.array([0, 0])
self.prev_act = np.array([0, 0])
else:
current = np.array([0, 0])
self.prev_act = np.clip(self.prev_act + current, -1, 1)
return self.prev_act
def run_policy(agent, env, max_ep_len=None, num_episodes=100, render=True):
env = wrappers.Monitor(env, directory=outdir, force=True)
env.seed(1) #########
o, r, d, ep_ret, ep_cost, ep_len, n = env.reset(), 0, False, 0, 0, 0, 0
while n < num_episodes:
if render:
env.render()
time.sleep(1e-3)
ltl_goal = ltl_progression.spotify(env.ltl_goal)
env.show_text(ltl_goal.to_str())
if("progress_info" in o.keys()):
env.show_prog_info(o["progress_info"])
a = agent.get_action(o)
a = np.clip(a, env.action_space.low, env.action_space.high)
o, r, d, info = env.step(a)
ep_ret += r
ep_cost += info.get('cost', 0)
ep_len += 1
if d or (ep_len == max_ep_len):
o, r, d, ep_ret, ep_cost, ep_len = env.reset(), 0, False, 0, 0, 0
n += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
subparsers = parser.add_subparsers(dest='command')
parser.add_argument('env_id', default='SafexpTest-v0', help='Select the environment to run')
parser_play = subparsers.add_parser('play', help='A playable agent that can be controlled.')
parser_random = subparsers.add_parser('random', help='An agent that picks actions at random (for testing).')
parser_viz = subparsers.add_parser('viz', help='Load the agent model from a file and visualize its action on the env.')
parser_viz.add_argument('model_path', type=str, help='The path to the model to load.')
parser_viz.add_argument("--ltl-sampler", default="Default",
help="the ltl formula template to sample from (default: DefaultSampler)")
args = vars(parser.parse_args()) # make it a dictionary
outdir = './storage/random-agent-results'
if (args["command"] == "play"):
env = gym.make(args["env_id"])
env.num_steps = 10000000
env = safety_wrappers.Play(env)
env = ltl_wrappers.LTLEnv(env, ltl_sampler="Default")
agent = PlayAgent(env)
elif (args["command"] == "random"):
env = gym.make(args["env_id"])
env.num_steps = 10000
env = safety_wrappers.Play(env)
env = ltl_wrappers.LTLEnv(env, ltl_sampler="Default")
agent = RandomAgent(env.action_space)
elif (args["command"] == "viz"):
# If the config is available (from trainig) then just load it here instead of asking the user of this script to provide all training time configs
config = vars(utils.load_config(args["model_path"]))
args.update(config)
env = gym.make(args["env_id"])
env = safety_wrappers.Play(env)
env = ltl_wrappers.LTLEnv(env, ltl_sampler=args["ltl_sampler"], progression_mode=args["progression_mode"])
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
agent = utils.Agent(env, env.observation_space, env.action_space, args["model_path"],
args["ignoreLTL"], args["progression_mode"], args["gnn"], device=device, dumb_ac=args["dumb_ac"])
else:
print("Incorrect command: ", args["command"])
exit(1)
run_policy(agent, env, max_ep_len=30000, num_episodes=1000)
| 4,800 | 33.292857 | 153 | py |
T2TL | T2TL-main/src/recurrent_model.py | """
This is the description of the deep NN currently being used.
It is a small CNN for the features with an GRU encoding of the LTL task.
The features and LTL are preprocessed by utils.format.get_obss_preprocessor(...) function:
- In that function, I transformed the LTL tuple representation into a text representation:
- Input: ('until',('not','a'),('and', 'b', ('until',('not','c'),'d')))
- output: ['until', 'not', 'a', 'and', 'b', 'until', 'not', 'c', 'd']
Each of those tokens get a one-hot embedding representation by the utils.format.Vocabulary class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
from gym.spaces import Box, Discrete
from gnns.graphs.GCN import *
from gnns.graphs.GNN import GNNMaker
from env_model import getEnvModel
from policy_network import PolicyNetwork
from model import LSTMModel, GRUModel, init_params
class RecurrentACModel(nn.Module, torch_ac.RecurrentACModel):
def __init__(self, env, obs_space, action_space, ignoreLTL, gnn_type, dumb_ac, freeze_ltl):
super().__init__()
# Decide which components are enabled
self.use_progression_info = "progress_info" in obs_space
self.use_text = not ignoreLTL and (gnn_type == "GRU" or gnn_type == "LSTM") and "text" in obs_space
self.use_ast = not ignoreLTL and ("GCN" in gnn_type) and "text" in obs_space
self.gnn_type = gnn_type
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.action_space = action_space
self.dumb_ac = dumb_ac
self.freeze_pretrained_params = freeze_ltl
if self.freeze_pretrained_params:
print("Freezing the LTL module.")
self.env_model = getEnvModel(env, obs_space)
# Define text embedding
if self.use_progression_info:
self.text_embedding_size = 32
self.simple_encoder = nn.Sequential(
nn.Linear(obs_space["progress_info"], 64),
nn.Tanh(),
nn.Linear(64, self.text_embedding_size),
nn.Tanh()
).to(self.device)
print("Linear encoder Number of parameters:", sum(p.numel() for p in self.simple_encoder.parameters() if p.requires_grad))
elif self.use_text:
self.word_embedding_size = 32
self.text_embedding_size = 32
if self.gnn_type == "GRU":
self.text_rnn = GRUModel(obs_space["text"], self.word_embedding_size, 16, self.text_embedding_size).to(self.device)
else:
assert(self.gnn_type == "LSTM")
self.text_rnn = LSTMModel(obs_space["text"], self.word_embedding_size, 16, self.text_embedding_size).to(self.device)
print("RNN Number of parameters:", sum(p.numel() for p in self.text_rnn.parameters() if p.requires_grad))
elif self.use_ast:
hidden_dim = 32
self.text_embedding_size = 32
self.gnn = GNNMaker(self.gnn_type, obs_space["text"], self.text_embedding_size).to(self.device)
print("GNN Number of parameters:", sum(p.numel() for p in self.gnn.parameters() if p.requires_grad))
# Memory specific code.
self.image_embedding_size = self.env_model.size()
self.memory_rnn = nn.LSTMCell(self.image_embedding_size, self.semi_memory_size)
self.embedding_size = self.semi_memory_size
print("embedding size:", self.embedding_size)
if self.use_text or self.use_ast or self.use_progression_info:
self.embedding_size += self.text_embedding_size
if self.dumb_ac:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size, self.action_space)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 1)
)
else:
# Define actor's model
self.actor = PolicyNetwork(self.embedding_size, self.action_space, hiddens=[64, 64, 64], activation=nn.ReLU())
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
# Initialize parameters correctly
self.apply(init_params)
@property
def memory_size(self):
return 2*self.semi_memory_size
@property
def semi_memory_size(self):
return self.image_embedding_size
def forward(self, obs, memory):
x = self.env_model(obs)
hidden = (memory[:, :self.semi_memory_size], memory[:, self.semi_memory_size:])
hidden = self.memory_rnn(x, hidden)
embedding = hidden[0]
memory = torch.cat(hidden, dim=1)
if self.use_progression_info:
embed_ltl = self.simple_encoder(obs.progress_info)
embedding = torch.cat((embedding, embed_ltl), dim=1) if embedding is not None else embed_ltl
# Adding Text
elif self.use_text:
embed_text = self.text_rnn(obs.text)
embedding = torch.cat((embedding, embed_text), dim=1) if embedding is not None else embed_text
# Adding GNN
elif self.use_ast:
embed_gnn = self.gnn(obs.text)
embedding = torch.cat((embedding, embed_gnn), dim=1) if embedding is not None else embed_gnn
# Actor
dist = self.actor(embedding)
# Critic
x = self.critic(embedding)
value = x.squeeze(1)
return dist, value, memory
def load_pretrained_gnn(self, model_state):
# We delete all keys relating to the actor/critic.
new_model_state = model_state.copy()
for key in model_state.keys():
if key.find("actor") != -1 or key.find("critic") != -1:
del new_model_state[key]
self.load_state_dict(new_model_state, strict=False)
if self.freeze_pretrained_params:
target = self.text_rnn if self.gnn_type == "GRU" or self.gnn_type == "LSTM" else self.gnn
for param in target.parameters():
param.requires_grad = False
| 6,302 | 37.2 | 134 | py |
T2TL | T2TL-main/src/policy_network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
from gym.spaces import Box, Discrete
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, action_space, hiddens=[], scales=None, activation=nn.Tanh()):
super().__init__()
layer_dims = [in_dim] + hiddens # layer_dims = [96, 64, 64, 64]
self.action_space = action_space
self.num_layers = len(layer_dims)
self.enc_ = nn.Sequential(*[fc(in_dim, out_dim, activation=activation)
for (in_dim, out_dim) in zip(layer_dims, layer_dims[1:])])
if (isinstance(self.action_space, Discrete)):
action_dim = self.action_space.n
self.discrete_ = nn.Sequential(
nn.Linear(layer_dims[-1], action_dim)
)
elif (isinstance(self.action_space, Box)):
action_dim = self.action_space.shape[0]
self.mu_ = nn.Sequential(
fc(layer_dims[-1], action_dim)
)
self.std_ = nn.Sequential(
fc(layer_dims[-1], action_dim)
)
self.softplus = nn.Softplus()
# self.scales = [1] * action_dim if scales==None else scales
else:
print("Unsupported action_space type: ", self.action_space)
exit(1)
def forward(self, obs):
if (isinstance(self.action_space, Discrete)):
x = self.enc_(obs)
x = self.discrete_(x)
return Categorical(logits=F.log_softmax(x, dim=1))
elif (isinstance(self.action_space, Box)):
x = self.enc_(obs)
mu = 2 * self.mu_(x)# * self.scales
std = self.softplus(self.std_(x)) + 1e-3
return Normal(mu, std)
else:
print("Unsupported action_space type: ", self.action_space)
exit(1)
def fc(in_dim, out_dim, activation=nn.Tanh()):
return nn.Sequential(
nn.Linear(in_dim, out_dim),
activation
)
| 2,026 | 33.355932 | 92 | py |
T2TL | T2TL-main/src/T1TL.py |
import argparse
import time
import datetime
import torch
import torch_ac
import tensorboardX
import sys
import glob
from math import floor
import utils
from model import ACModel
from recurrent_model import RecurrentACModel
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
## General parameters
parser.add_argument("--algo", default='ppo',
help="algorithm to use: a2c | ppo (REQUIRED)")
parser.add_argument("--env", default='Zones-25-v1',
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--ltl-sampler", default="Until_1_2_1_1",
help="the ltl formula template to sample from (default: DefaultSampler)")
parser.add_argument("--model", default=None,
help="name of the model (default: {ENV}_{SAMPLER}_{ALGO}_{TIME})")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default: 10)")
parser.add_argument("--save-interval", type=int, default=2,
help="number of updates between two saves (default: 10, 0 means no saving)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--frames", type=int, default=10200000,
help="number of frames of training (default: 2*10e8)")
parser.add_argument("--checkpoint-dir", default=None)
## Evaluation parameters
parser.add_argument("--eval", action="store_true", default=False,
help="evaluate the saved model (default: False)")
parser.add_argument("--eval-episodes", type=int, default=5,
help="number of episodes to evaluate on (default: 5)")
parser.add_argument("--eval-env", default=None,
help="name of the environment to train on (default: use the same \"env\" as training)")
parser.add_argument("--ltl-samplers-eval", default=None, nargs='+',
help="the ltl formula templates to sample from for evaluation (default: use the same \"ltl-sampler\" as training)")
parser.add_argument("--eval-procs", type=int, default=1,
help="number of processes (default: use the same \"procs\" as training)")
## Parameters for main algorithm
parser.add_argument("--epochs", type=int, default=10,
help="number of epochs for PPO (default: 4)")
parser.add_argument("--batch-size", type=int, default=1024,
help="batch size for PPO (default: 256)")
parser.add_argument("--frames-per-proc", type=int, default=4096,
help="number of frames per process before update (default: 5 for A2C and 128 for PPO)")
parser.add_argument("--discount", type=float, default=0.998,
help="discount factor (default: 0.99)")
parser.add_argument("--lr", type=float, default=0.0003,
help="learning rate (default: 0.0003)")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)")
parser.add_argument("--entropy-coef", type=float, default=0.003,
help="entropy term coefficient (default: 0.01)")
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="value loss term coefficient (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="maximum norm of gradient (default: 0.5)")
parser.add_argument("--optim-eps", type=float, default=1e-8,
help="Adam and RMSprop optimizer epsilon (default: 1e-8)")
parser.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer alpha (default: 0.99)")
parser.add_argument("--clip-eps", type=float, default=0.2,
help="clipping epsilon for PPO (default: 0.2)")
parser.add_argument("--ignoreLTL", action="store_true", default=False,
help="the network ignores the LTL input")
parser.add_argument("--noLTL", action="store_true", default=False,
help="the environment no longer has an LTL goal. --ignoreLTL must be specified concurrently.")
parser.add_argument("--progression-mode", default="full",
help="Full: uses LTL progression; partial: shows the propositions which progress or falsify the formula; none: only original formula is seen. ")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--gnn", default="Transformer", help="use gnn to model the LTL (only if ignoreLTL==True)")
parser.add_argument("--trans_layer", type=int, default=1, help="the number of Transformer layers need to use")
parser.add_argument("--int-reward", type=float, default=0.0, help="the intrinsic reward for LTL progression (default: 0.0)")
parser.add_argument("--pretrained-gnn", action="store_true", default=False, help="load a pre-trained LTL module.")
parser.add_argument("--dumb-ac", action="store_true", default=False, help="Use a single-layer actor-critic")
parser.add_argument("--freeze-ltl", action="store_true", default=False, help="Freeze the gradient updates of the LTL module")
# Transformer special parameters
parser.add_argument("--d_model", type=int, default=64, help="")
parser.add_argument("--nhead", type=int, default=8, help="")
parser.add_argument("--num_encoder_layers", type=int, default=4, help="")
parser.add_argument("--pool", type=str, default='mean', help="")
parser.add_argument("--dim_feedforward", type=int, default=256, help="")
parser.add_argument("--dropout", type=float, default=0.0, help="")
parser.add_argument("--d_out", type=int, default=16, help="")
parser.add_argument("--layer_norm_eps", type=float, default=1e-5, help="")
parser.add_argument("--TFixup", type=bool, default=True, help="")
parser.add_argument("--cuda", type=str, default='cuda:0', help="")
# additional desciption for test
parser.add_argument("--sth", type=str, default='None', help="")
args = parser.parse_args()
use_mem = args.recurrence > 1 # whether use memory or not
# Set run dir
date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") # '21-08-21-22-36-39'
gnn_name = args.gnn
if args.ignoreLTL:
gnn_name = "IgnoreLTL"
if args.dumb_ac:
gnn_name = gnn_name + "-dumb_ac"
if args.pretrained_gnn:
gnn_name = gnn_name + "-pretrained"
if args.freeze_ltl:
gnn_name = gnn_name + "-freeze_ltl"
if use_mem:
gnn_name = gnn_name + "-recurrence:%d"%(args.recurrence)
if args.gnn == 'Transformer':
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_bs:{args.batch_size}_n_encoder:{args.num_encoder_layers}_nhead:{args.nhead}_d_model:{args.d_model}_FFD:{args.dim_feedforward}_d_out:{args.d_out}_Init:{args.TFixup}_sth:{args.sth}"
else:
# 'RGCN_8x32_ROOT_SHARED_Until_1_2_1_1_Zones-5-v0_seed:1_epochs:10_bs:2048_fpp:4096_dsc:0.998_lr:0.0003_ent:0.003_clip:0.2_prog:full'
default_model_name = f"{gnn_name}_{args.ltl_sampler}_{args.env}_seed:{args.seed}_epochs:{args.epochs}_bs:{args.batch_size}_fpp:{args.frames_per_proc}_dsc:{args.discount}_lr:{args.lr}_ent:{args.entropy_coef}_clip:{args.clip_eps}_prog:{args.progression_mode}"
model_name = args.model or default_model_name
storage_dir = "storage" if args.checkpoint_dir is None else args.checkpoint_dir
model_dir = utils.get_model_dir(model_name, storage_dir)
pretrained_model_dir = None
if args.pretrained_gnn:
assert(args.progression_mode == "full")
# default_dir = f"symbol-storage/{args.gnn}-dumb_ac_{args.ltl_sampler}_Simple-LTL-Env-v0_seed:{args.seed}_*_prog:{args.progression_mode}/train"
default_dir = f"{args.gnn}-dumb_ac_{args.ltl_sampler}_Simple-LTL-Env-v0_seed:{args.seed}_*_prog:{args.progression_mode}/train"
print(default_dir)
model_dirs = glob.glob(default_dir)
if len(model_dirs) == 0:
raise Exception("Pretraining directory not found.")
elif len(model_dirs) > 1:
raise Exception("More than 1 candidate pretraining directory found.")
pretrained_model_dir = model_dirs[0]
# Load loggers and Tensorboard writer
txt_logger = utils.get_txt_logger(model_dir + "/train")
csv_file, csv_logger = utils.get_csv_logger(model_dir + "/train")
tb_writer = tensorboardX.SummaryWriter(model_dir + "/train")
utils.save_config(model_dir + "/train", args)
# Log command and all script arguments
txt_logger.info("{}\n".format(" ".join(sys.argv)))
txt_logger.info("{}\n".format(args)) # It will output the context of Namespace
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device(args.cuda)
# device = torch.device('cpu')
txt_logger.info(f"Device: {device}\n") # Output the device (default is cpu)
# Load environments
envs = []
progression_mode = args.progression_mode
for i in range(args.procs): # load the env & progression_mode & LTL formula
# turn to utils/env.py
envs.append(utils.make_env(args.env, progression_mode, args.ltl_sampler, args.seed, args.int_reward, args.noLTL))
# Sync environments
envs[0].reset() # Add the agent to map & translate the LTL formula
txt_logger.info("Environments loaded\n")
# Load training status
try:
status = utils.get_status(model_dir + "/train", args)
except OSError:
status = {"num_frames": 0, "update": 0} # ??? the state of algorithm ?
txt_logger.info("Training status loaded.\n")
if pretrained_model_dir is not None:
try:
pretrained_status = utils.get_status(pretrained_model_dir, args)
except:
txt_logger.info("Failed to load pretrained model.\n")
exit(1)
# Load observations preprocessor-- build AST
using_gnn = (args.gnn != "GRU" and args.gnn != "LSTM" and args.gnn != "Transformer")
# turn to env/format.py
obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0], using_gnn, progression_mode)
if "vocab" in status and preprocess_obss.vocab is not None:
preprocess_obss.vocab.load_vocab(status["vocab"])
txt_logger.info("Observations preprocessor loaded.\n")
# Load model
if use_mem:
acmodel = RecurrentACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl)
else:
acmodel = ACModel(envs[0].env, obs_space, envs[0].action_space, args.ignoreLTL, args.gnn, args.dumb_ac, args.freeze_ltl, args)
if "model_state" in status:
acmodel.load_state_dict(status["model_state"])
txt_logger.info("Loading model from existing run.\n")
elif args.pretrained_gnn:
acmodel.load_pretrained_gnn(pretrained_status["model_state"])
txt_logger.info("Pretrained model loaded.\n")
acmodel.to(device)
txt_logger.info("Model loaded.\n")
txt_logger.info("{}\n".format(acmodel))
# Load algo
if args.algo == "a2c":
algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_alpha, args.optim_eps, preprocess_obss)
elif args.algo == "ppo":
algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss)
else:
raise ValueError("Incorrect algorithm name: {}".format(args.algo))
if "optimizer_state" in status:
algo.optimizer.load_state_dict(status["optimizer_state"])
txt_logger.info("Loading optimizer from existing run.\n")
txt_logger.info("Optimizer loaded.\n")
# init the evaluator
if args.eval:
eval_samplers = args.ltl_samplers_eval if args.ltl_samplers_eval else [args.ltl_sampler]
eval_env = args.eval_env if args.eval_env else args.env
eval_procs = args.eval_procs if args.eval_procs else args.procs
evals = []
for eval_sampler in eval_samplers:
evals.append(utils.Eval(eval_env, model_name, eval_sampler,
seed=args.seed, device=device, num_procs=eval_procs, ignoreLTL=args.ignoreLTL, progression_mode=progression_mode, gnn=args.gnn, dumb_ac = args.dumb_ac))
# Train model
num_frames = status["num_frames"] # num_frames:0
update = status["update"] # update:0
start_time = time.time()
while num_frames < args.frames:
# Update model parameters
update_start_time = time.time()
exps, logs1 = algo.collect_experiences()
# interacte with environmets (very important)
logs2 = algo.update_parameters(exps)
logs = {**logs1, **logs2}
update_end_time = time.time()
num_frames += logs["num_frames"]
update += 1
# Print logs
if update % args.log_interval == 0:
fps = logs["num_frames"]/(update_end_time - update_start_time)
duration = int(time.time() - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"])
average_reward_per_step = utils.average_reward_per_step(logs["return_per_episode"], logs["num_frames_per_episode"])
average_discounted_return = utils.average_discounted_return(logs["return_per_episode"], logs["num_frames_per_episode"], args.discount)
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
header = ["update", "frames", "FPS", "duration"]
data = [update, num_frames, fps, duration]
header += ["rreturn_" + key for key in rreturn_per_episode.keys()]
data += rreturn_per_episode.values()
header += ["average_reward_per_step", "average_discounted_return"]
data += [average_reward_per_step, average_discounted_return]
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["entropy", "value", "policy_loss", "value_loss", "grad_norm"]
data += [logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"]]
txt_logger.info(
"U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | ARPS: {:.3f} | ADR: {:.3f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}"
.format(*data))
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
if status["num_frames"] == 0:
csv_logger.writerow(header)
csv_logger.writerow(data)
csv_file.flush()
for field, value in zip(header, data):
tb_writer.add_scalar(field, value, num_frames)
# Save status
if args.save_interval > 0 and update % args.save_interval == 0:
status = {"num_frames": num_frames, "update": update,
"model_state": algo.acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict()}
if hasattr(preprocess_obss, "vocab") and preprocess_obss.vocab is not None:
status["vocab"] = preprocess_obss.vocab.vocab
utils.save_status(status, model_dir + "/train")
txt_logger.info("Status saved")
if args.eval:
# we send the num_frames to align the eval curves with the training curves on TB
for evalu in evals:
evalu.eval(num_frames, episodes=args.eval_episodes)
| 16,666 | 49.506061 | 268 | py |
T2TL | T2TL-main/src/torch_ac/format.py | import torch
def default_preprocess_obss(obss, device=None):
return torch.tensor(obss, device=device) | 106 | 25.75 | 47 | py |
T2TL | T2TL-main/src/torch_ac/model.py | from abc import abstractmethod, abstractproperty
import torch.nn as nn
import torch.nn.functional as F
class ACModel:
recurrent = False
@abstractmethod
def __init__(self, obs_space, action_space):
pass
@abstractmethod
def forward(self, obs):
pass
class RecurrentACModel(ACModel):
recurrent = True
@abstractmethod
def forward(self, obs, memory):
pass
@property
@abstractmethod
def memory_size(self):
pass | 485 | 17.692308 | 48 | py |
T2TL | T2TL-main/src/torch_ac/__init__.py | from torch_ac.algos import A2CAlgo, PPOAlgo
from torch_ac.model import ACModel, RecurrentACModel
from torch_ac.utils import DictList | 132 | 43.333333 | 52 | py |
T2TL | T2TL-main/src/torch_ac/algos/base.py | from abc import ABC, abstractmethod
import torch
from torch_ac.format import default_preprocess_obss
from torch_ac.utils import DictList, ParallelEnv
import numpy as np
from collections import deque
class BaseAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, acmodel, device, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward,
history_length):
"""
Initializes a `BaseAlgo` instance.
Parameters:
----------
envs : list
a list of environments that will be run in parallel
acmodel : torch.Module
the model
num_frames_per_proc : int
the number of frames collected by every process for an update
discount : float
the discount for future rewards
lr : float
the learning rate for optimizers
gae_lambda : float
the lambda coefficient in the GAE formula
([Schulman et al., 2015](https://arxiv.org/abs/1506.02438))
entropy_coef : float
the weight of the entropy cost in the final objective
value_loss_coef : float
the weight of the value loss in the final objective
max_grad_norm : float
gradient will be clipped to be at most this value
recurrence : int
the number of steps the gradient is propagated back in time
preprocess_obss : function
a function that takes observations returned by the environment
and converts them into the format that the model can handle
reshape_reward : function
a function that shapes the reward, takes an
(observation, action, reward, done) tuple as an input
"""
# Store parameters
self.env = ParallelEnv(envs)
self.acmodel = acmodel
self.device = device
self.num_frames_per_proc = num_frames_per_proc # 4096
self.discount = discount # 0.998
self.lr = lr # 0.0003
self.gae_lambda = gae_lambda
self.entropy_coef = entropy_coef # 0.95
self.value_loss_coef = value_loss_coef # 0.5
self.max_grad_norm = max_grad_norm # 0.5
self.recurrence = recurrence # 1
self.preprocess_obss = preprocess_obss or default_preprocess_obss
self.reshape_reward = reshape_reward # None
self.action_space_shape = envs[0].action_space.shape # 2
self.use_cont = self.acmodel.context
# Control parameters
assert self.acmodel.recurrent or self.recurrence == 1
assert self.num_frames_per_proc % self.recurrence == 0
# Configure acmodel
self.acmodel.to(self.device)
self.acmodel.train()
# Store helpers values
self.num_procs = len(envs) # 16
self.num_frames = self.num_frames_per_proc * self.num_procs # 4096*16=65536
# Initialize experience values
shape = (self.num_frames_per_proc, self.num_procs) # shape: (4096, 16)
act_shape = shape + self.action_space_shape # act_shape: (4096, 16, 2)
# in this, each env has its own original ltl
self.obs = self.env.reset() # all 16 envs will be reset (in this, each env has its own original ltl)
self.obss = [None]*(shape[0]) # [None,...4096..., None]
if self.acmodel.recurrent:
self.memory = torch.zeros(shape[1], self.acmodel.memory_size, device=self.device)
self.memories = torch.zeros(*shape, self.acmodel.memory_size, device=self.device)
self.mask = torch.ones(shape[1], device=self.device) # [1., ...16..., 1.]
self.masks = torch.zeros(*shape, device=self.device) # torch.Size([4096, 16])
self.actions = torch.zeros(*act_shape, device=self.device) #, dtype=torch.int) torch.Size([4096, 16, 2])
self.values = torch.zeros(*shape, device=self.device) # torch.Size([4096, 16])
self.rewards = torch.zeros(*shape, device=self.device) # torch.Size([4096, 16])
self.advantages = torch.zeros(*shape, device=self.device) # torch.Size([4096, 16])
self.log_probs = torch.zeros(*act_shape, device=self.device) # torch.Size([4096, 16, 2])
if self.use_cont:
# Initialize Context Variable Setup # todo: check
### history ####
with torch.no_grad():
preprocess_obs = self.preprocess_obss(self.obs, device=self.device)
preprocess_obs = self.acmodel.env_model(preprocess_obs)
self.history_length = history_length
self.rewards_hist = deque(maxlen=history_length)
self.actions_hist = deque(maxlen=history_length)
self.obsvs_hist = deque(maxlen=history_length)
self.next_hrews = deque(maxlen=history_length)
self.next_hacts = deque(maxlen=history_length)
self.next_hobvs = deque(maxlen=history_length)
zero_action = torch.zeros(*(self.num_procs, self.action_space_shape[0]), device=self.device)
zero_obs = torch.zeros(*(self.num_procs, (self.acmodel.embedding_size-self.acmodel.text_embedding_size)), device=self.device)
for _ in range(history_length):
self.rewards_hist.append(torch.zeros(*(shape[1], 1), device=self.device))
self.actions_hist.append(zero_action.clone())
self.obsvs_hist.append(zero_obs.clone())
# same thing for next_h*
self.next_hrews.append(torch.zeros(*(shape[1], 1), device=self.device))
self.next_hacts.append(zero_action.clone())
self.next_hobvs.append(zero_obs.clone())
self.rewards_hist.append(torch.zeros(*(shape[1], 1), device=self.device))
self.obsvs_hist.append(preprocess_obs.clone())
rand_action = torch.FloatTensor(envs[0].action_space.sample()).unsqueeze(0)
for m in range(len(envs)-1):
rand_action = torch.concat([rand_action,
torch.FloatTensor(envs[m+1].action_space.sample()).unsqueeze(0)], dim=0)
self.actions_hist.append(rand_action.to(self.device).clone())
self.rewards_hist_pro = torch.zeros(*(shape[0], history_length*shape[1]), device=self.device)
self.actions_hist_pro = torch.zeros(*(shape[0], history_length*self.action_space_shape[0]*shape[1]), device=self.device)
self.obsvs_hist_pro = torch.zeros(*(shape[0],
history_length*shape[1]*(self.acmodel.embedding_size-self.acmodel.text_embedding_size)),
device=self.device)
self.next_hrews_pro = torch.zeros(*(shape[0], history_length*shape[1]), device=self.device)
self.next_hacts_pro = torch.zeros(*(shape[0], history_length*self.action_space_shape[0]*shape[1]), device=self.device)
self.next_hobvs_pro = torch.zeros(*(shape[0],
history_length*shape[1]*(self.acmodel.embedding_size-self.acmodel.text_embedding_size)),
device=self.device)
# Initialize log values
self.log_episode_return = torch.zeros(self.num_procs, device=self.device) # shape = (16,)
self.log_episode_reshaped_return = torch.zeros(self.num_procs, device=self.device) # shape = (16,)
self.log_episode_num_frames = torch.zeros(self.num_procs, device=self.device) # shape = (16,)
self.log_done_counter = 0
self.log_return = [0] * self.num_procs
self.log_reshaped_return = [0] * self.num_procs
self.log_num_frames = [0] * self.num_procs
def collect_experiences(self):
"""
Collects rollouts and computes advantages.
Runs several environments concurrently. The next actions are computed
in a batch mode for all environments at the same time. The rollouts
and advantages from all environments are concatenated together.
Returns
-------
exps : DictList
Contains actions, rewards, advantages etc as attributes.
Each attribute, e.g. `exps.reward` has a shape
(self.num_frames_per_proc * num_envs, ...). k-th block
of consecutive `self.num_frames_per_proc` frames contains
data obtained from the k-th environment. Be careful not to mix
data from different environments!
logs : dict
Useful stats about the training process, including the average
reward, policy loss, value loss, etc.
"""
for i in range(self.num_frames_per_proc): # range(4096)
# Do one agent-environment interaction
if self.use_cont:
# previous context variable
np_pre_actions, np_pre_rewards, np_pre_obsers = self.actions_hist[0], self.rewards_hist[0], self.obsvs_hist[0]
for k in range(self.history_length-1):
np_pre_actions = torch.concat([np_pre_actions, self.actions_hist[k + 1]], dim=1)
np_pre_rewards = torch.concat([np_pre_rewards, self.rewards_hist[k + 1]], dim=1)
np_pre_obsers = torch.concat([np_pre_obsers, self.obsvs_hist[k + 1]], dim=1)
self.actions_hist_pro[i] = np_pre_actions.flatten().unsqueeze(0)
self.rewards_hist_pro[i] = np_pre_rewards.flatten().unsqueeze(0)
self.obsvs_hist_pro[i] = np_pre_obsers.flatten().unsqueeze(0)
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
if self.acmodel.recurrent:
dist, value, memory = self.acmodel(preprocessed_obs, self.memory * self.mask.unsqueeze(1))
elif self.acmodel.context:
dist, value, embedding = self.acmodel(preprocessed_obs,
[np_pre_actions, np_pre_rewards, np_pre_obsers],
)
else:
dist, value = self.acmodel(preprocessed_obs) # dist = Normal(loc: torch.Size([16, 2]), scale: torch.Size([16, 2])); shape(value)=16
action = dist.sample() # shape = torch.Size([16, 2])
obs, reward, done, _ = self.env.step(action.cpu().numpy())
if self.use_cont:
###############
self.next_hrews.append(torch.FloatTensor(reward).view(self.num_procs, 1).to(self.device))
self.next_hacts.append(action.clone())
self.next_hobvs.append(embedding.clone()) # todo: check
# np_next_hacts and np_next_hrews are required for TD3 alg
np_next_hacts, np_next_hrews, np_next_hobvs = self.next_hacts[0], self.next_hrews[0], self.next_hobvs[0]
for k in range(self.history_length - 1):
np_next_hacts= torch.concat([np_next_hacts, self.next_hacts[k + 1]], dim=1)
np_next_hrews = torch.concat([np_next_hrews, self.next_hrews[k + 1]], dim=1)
np_next_hobvs= torch.concat([np_next_hobvs, self.next_hobvs[k + 1]], dim=1)
# np_next_hacts = np.asarray(self.next_hacts, dtype=np.float32).flatten() # (hist, action_dim) => (hist *action_dim,)
# np_next_hrews = np.asarray(self.next_hrews, dtype=np.float32) # (hist, )
# np_next_hobvs = np.asarray(self.next_hobvs, dtype=np.float32).flatten() # (hist, )
self.next_hacts_pro[i] = np_next_hacts.flatten().unsqueeze(0)
self.next_hrews_pro[i] = np_next_hrews.flatten().unsqueeze(0)
self.next_hobvs_pro[i] = np_next_hobvs.flatten().unsqueeze(0)
# new becomes old
self.rewards_hist.append(torch.FloatTensor(reward).view(self.num_procs, 1).to(self.device))
self.actions_hist.append(action.clone())
self.obsvs_hist.append(embedding.clone()) # todo: check
# Update experiences values
self.obss[i] = self.obs # each i = {list: 16}
self.obs = obs
if self.acmodel.recurrent:
self.memories[i] = self.memory
self.memory = memory
self.masks[i] = self.mask
self.mask = 1 - torch.tensor(done, device=self.device, dtype=torch.float)
self.actions[i] = action
self.values[i] = value
if self.reshape_reward is not None:
self.rewards[i] = torch.tensor([
self.reshape_reward(obs_, action_, reward_, done_)
for obs_, action_, reward_, done_ in zip(obs, action, reward, done)
], device=self.device)
else:
self.rewards[i] = torch.tensor(reward, device=self.device)
self.log_probs[i] = dist.log_prob(action)
# Update log values
self.log_episode_return += torch.tensor(reward, device=self.device, dtype=torch.float)
self.log_episode_reshaped_return += self.rewards[i]
self.log_episode_num_frames += torch.ones(self.num_procs, device=self.device)
for i, done_ in enumerate(done):
if done_:
self.log_done_counter += 1
self.log_return.append(self.log_episode_return[i].item())
self.log_reshaped_return.append(self.log_episode_reshaped_return[i].item())
self.log_num_frames.append(self.log_episode_num_frames[i].item())
self.log_episode_return *= self.mask
self.log_episode_reshaped_return *= self.mask
self.log_episode_num_frames *= self.mask
# Add advantage and return to experiences
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
if self.acmodel.recurrent:
_, next_value, _ = self.acmodel(preprocessed_obs, self.memory * self.mask.unsqueeze(1))
elif self.acmodel.context:
_, next_value, _ = self.acmodel(preprocessed_obs,
[np_next_hacts, np_next_hrews, np_next_hobvs],
)
else:
_, next_value = self.acmodel(preprocessed_obs)
for i in reversed(range(self.num_frames_per_proc)):
next_mask = self.masks[i+1] if i < self.num_frames_per_proc - 1 else self.mask
next_value = self.values[i+1] if i < self.num_frames_per_proc - 1 else next_value
next_advantage = self.advantages[i+1] if i < self.num_frames_per_proc - 1 else 0
delta = self.rewards[i] + self.discount * next_value * next_mask - self.values[i]
self.advantages[i] = delta + self.discount * self.gae_lambda * next_advantage * next_mask
# Define experiences:
# the whole experience is the concatenation of the experience
# of each process.
# In comments below:
# - T is self.num_frames_per_proc,
# - P is self.num_procs,
# - D is the dimensionality.
exps = DictList()
exps.obs = [self.obss[i][j]
for j in range(self.num_procs)
for i in range(self.num_frames_per_proc)]
if self.acmodel.recurrent:
# T x P x D -> P x T x D -> (P * T) x D
exps.memory = self.memories.transpose(0, 1).reshape(-1, *self.memories.shape[2:])
# T x P -> P x T -> (P * T) x 1
exps.mask = self.masks.transpose(0, 1).reshape(-1).unsqueeze(1)
# for all tensors below, T x P -> P x T -> P * T
exps.action = self.actions.transpose(0, 1).reshape((-1, ) + self.action_space_shape)
exps.value = self.values.transpose(0, 1).reshape(-1)
exps.reward = self.rewards.transpose(0, 1).reshape(-1)
exps.advantage = self.advantages.transpose(0, 1).reshape(-1)
exps.returnn = exps.value + exps.advantage
exps.log_prob = self.log_probs.transpose(0, 1).reshape((-1, ) + self.action_space_shape)
if self.use_cont:
exps.actions_hist = self.actions_hist_pro.reshape(-1, self.history_length*self.action_space_shape[0])
exps.rewards_hist = self.rewards_hist_pro.reshape(-1, self.history_length)
exps.obsvs_hist = self.obsvs_hist_pro.reshape(-1, self.history_length*(self.acmodel.embedding_size-self.acmodel.text_embedding_size))
# Preprocess experiences
exps.obs = self.preprocess_obss(exps.obs, device=self.device)
# Log some values
keep = max(self.log_done_counter, self.num_procs)
logs = {
"return_per_episode": self.log_return[-keep:],
"reshaped_return_per_episode": self.log_reshaped_return[-keep:],
"num_frames_per_episode": self.log_num_frames[-keep:],
"num_frames": self.num_frames
}
self.log_done_counter = 0
self.log_return = self.log_return[-self.num_procs:]
self.log_reshaped_return = self.log_reshaped_return[-self.num_procs:]
self.log_num_frames = self.log_num_frames[-self.num_procs:]
return exps, logs
@abstractmethod
def update_parameters(self):
pass
| 17,512 | 49.469741 | 152 | py |
T2TL | T2TL-main/src/torch_ac/algos/a2c.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.base import BaseAlgo
class A2CAlgo(BaseAlgo):
"""The Advantage Actor-Critic algorithm."""
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None, discount=0.99, lr=0.01, gae_lambda=0.95,
entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4,
rmsprop_alpha=0.99, rmsprop_eps=1e-8, preprocess_obss=None, reshape_reward=None):
num_frames_per_proc = num_frames_per_proc or 8
super().__init__(envs, acmodel, device, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward)
self.optimizer = torch.optim.RMSprop(self.acmodel.parameters(), lr,
alpha=rmsprop_alpha, eps=rmsprop_eps)
def update_parameters(self, exps):
# Compute starting indexes
inds = self._get_starting_indexes()
# Initialize update values
update_entropy = 0
update_value = 0
update_policy_loss = 0
update_value_loss = 0
update_loss = 0
# Initialize memory
if self.acmodel.recurrent:
memory = exps.memory[inds]
for i in range(self.recurrence):
# Create a sub-batch of experience
sb = exps[inds + i]
# Compute loss
if self.acmodel.recurrent:
dist, value, memory = self.acmodel(sb.obs, memory * sb.mask)
else:
dist, value = self.acmodel(sb.obs)
entropy = dist.entropy().mean()
policy_loss = -(dist.log_prob(sb.action) * sb.advantage).mean()
value_loss = (value - sb.returnn).pow(2).mean()
loss = policy_loss - self.entropy_coef * entropy + self.value_loss_coef * value_loss
# Update batch values
update_entropy += entropy.item()
update_value += value.mean().item()
update_policy_loss += policy_loss.item()
update_value_loss += value_loss.item()
update_loss += loss
# Update update values
update_entropy /= self.recurrence
update_value /= self.recurrence
update_policy_loss /= self.recurrence
update_value_loss /= self.recurrence
update_loss /= self.recurrence
# Update actor-critic
self.optimizer.zero_grad()
update_loss.backward()
update_grad_norm = sum(p.grad.data.norm(2) ** 2 for p in self.acmodel.parameters()) ** 0.5
torch.nn.utils.clip_grad_norm_(self.acmodel.parameters(), self.max_grad_norm)
self.optimizer.step()
# Log some values
logs = {
"entropy": update_entropy,
"value": update_value,
"policy_loss": update_policy_loss,
"value_loss": update_value_loss,
"grad_norm": update_grad_norm
}
return logs
def _get_starting_indexes(self):
"""Gives the indexes of the observations given to the model and the
experiences used to compute the loss at first.
The indexes are the integers from 0 to `self.num_frames` with a step of
`self.recurrence`. If the model is not recurrent, they are all the
integers from 0 to `self.num_frames`.
Returns
-------
starting_indexes : list of int
the indexes of the experiences to be used at first
"""
starting_indexes = numpy.arange(0, self.num_frames, self.recurrence)
return starting_indexes
| 3,659 | 31.972973 | 117 | py |
T2TL | T2TL-main/src/torch_ac/algos/ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.base import BaseAlgo
class PPOAlgo(BaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None, discount=0.99, lr=0.001, gae_lambda=0.95,
entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4,
adam_eps=1e-8, clip_eps=0.2, epochs=4, batch_size=256, preprocess_obss=None,
reshape_reward=None, history_length=16):
num_frames_per_proc = num_frames_per_proc or 128
super().__init__(envs, acmodel, device, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward, history_length)
self.clip_eps = clip_eps # 0.2
self.epochs = epochs # 10
self.batch_size = batch_size # 2048
self.act_shape = envs[0].action_space.shape # 2
assert self.batch_size % self.recurrence == 0
self.optimizer = torch.optim.Adam(self.acmodel.parameters(), lr, eps=adam_eps)
self.batch_num = 0
def update_parameters(self, exps):
# Collect experiences
for _ in range(self.epochs):
# Initialize log values
log_entropies = []
log_values = []
log_policy_losses = []
log_value_losses = []
log_grad_norms = []
for inds in self._get_batches_starting_indexes():
# Initialize batch values
batch_entropy = 0
batch_value = 0
batch_policy_loss = 0
batch_value_loss = 0
batch_loss = 0
# Initialize memory
if self.acmodel.recurrent:
memory = exps.memory[inds]
for i in range(self.recurrence):
# Create a sub-batch of experience
sb = exps[inds + i]
# Compute loss
if self.acmodel.recurrent:
dist, value, memory = self.acmodel(sb.obs, memory * sb.mask)
elif self.acmodel.context:
dist, value, _ = self.acmodel(sb.obs,
[sb.actions_hist, sb.rewards_hist, sb.obsvs_hist])
else:
dist, value = self.acmodel(sb.obs)
entropy = dist.entropy().mean()
# ratio = torch.exp(dist.log_prob(sb.action) - sb.log_prob)
delta_log_prob = dist.log_prob(sb.action) - sb.log_prob
if (len(self.act_shape) == 1): # Not scalar actions (multivariate)
delta_log_prob = torch.sum(delta_log_prob, dim=1)
ratio = torch.exp(delta_log_prob)
surr1 = ratio * sb.advantage
surr2 = torch.clamp(ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps) * sb.advantage
policy_loss = -torch.min(surr1, surr2).mean()
value_clipped = sb.value + torch.clamp(value - sb.value, -self.clip_eps, self.clip_eps)
surr1 = (value - sb.returnn).pow(2)
surr2 = (value_clipped - sb.returnn).pow(2)
value_loss = torch.max(surr1, surr2).mean()
loss = policy_loss - self.entropy_coef * entropy + self.value_loss_coef * value_loss
# Update batch values
batch_entropy += entropy.item()
batch_value += value.mean().item()
batch_policy_loss += policy_loss.item()
batch_value_loss += value_loss.item()
batch_loss += loss
# Update memories for next epoch
if self.acmodel.recurrent and i < self.recurrence - 1:
exps.memory[inds + i + 1] = memory.detach()
# Update batch values
batch_entropy /= self.recurrence
batch_value /= self.recurrence
batch_policy_loss /= self.recurrence
batch_value_loss /= self.recurrence
batch_loss /= self.recurrence
# Update actor-critic
self.optimizer.zero_grad()
batch_loss.backward()
grad_norm = sum(p.grad.data.norm(2).item() ** 2 for p in self.acmodel.parameters() if p.requires_grad) ** 0.5
torch.nn.utils.clip_grad_norm_([p for p in self.acmodel.parameters() if p.requires_grad], self.max_grad_norm)
self.optimizer.step()
# Update log values
log_entropies.append(batch_entropy)
log_values.append(batch_value)
log_policy_losses.append(batch_policy_loss)
log_value_losses.append(batch_value_loss)
log_grad_norms.append(grad_norm)
# Log some values
logs = {
"entropy": numpy.mean(log_entropies),
"value": numpy.mean(log_values),
"policy_loss": numpy.mean(log_policy_losses),
"value_loss": numpy.mean(log_value_losses),
"grad_norm": numpy.mean(log_grad_norms)
}
return logs
def _get_batches_starting_indexes(self):
"""Gives, for each batch, the indexes of the observations given to
the model and the experiences used to compute the loss at first.
First, the indexes are the integers from 0 to `self.num_frames` with a step of
`self.recurrence`, shifted by `self.recurrence//2` one time in two for having
more diverse batches. Then, the indexes are splited into the different batches.
Returns
-------
batches_starting_indexes : list of list of int
the indexes of the experiences to be used at first for each batch
"""
indexes = numpy.arange(0, self.num_frames, self.recurrence)
indexes = numpy.random.permutation(indexes)
# Shift starting indexes by self.recurrence//2 half the time
if self.batch_num % 2 == 1:
indexes = indexes[(indexes + self.recurrence) % self.num_frames_per_proc != 0]
indexes += self.recurrence // 2
self.batch_num += 1
num_indexes = self.batch_size // self.recurrence
batches_starting_indexes = [indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)]
return batches_starting_indexes
| 6,682 | 39.50303 | 125 | py |
T2TL | T2TL-main/src/torch_ac/algos/__init__.py | from torch_ac.algos.a2c import A2CAlgo
from torch_ac.algos.ppo import PPOAlgo | 77 | 38 | 38 | py |
T2TL | T2TL-main/src/torch_ac/utils/__init__.py | from torch_ac.utils.dictlist import DictList
from torch_ac.utils.penv import ParallelEnv | 88 | 43.5 | 44 | py |
T2TL | T2TL-main/src/utils/ast_builder.py | import ring
import numpy as np
import torch
import dgl
import networkx as nx
from sklearn.preprocessing import OneHotEncoder
edge_types = {k:v for (v, k) in enumerate(["self", "arg", "arg1", "arg2"])}
"""
A class that can take an LTL formula and generate the Abstract Syntax Tree (AST) of it. This
code can generate trees in either Networkx or DGL formats. And uses caching to remember recently
generated trees.
"""
class ASTBuilder(object):
def __init__(self, propositions):
super(ASTBuilder, self).__init__()
self.props = propositions
terminals = ['True', 'False'] + self.props
## Pad terminals with dummy propositions to get a fixed encoding size
for i in range(15 - len(terminals)):
terminals.append("dummy_"+str(i)) # terminals = ['True', 'False', 'J', 'W', 'R', 'Y', 'dummy_0', 'dummy_1', 'dummy_2', 'dummy_3', 'dummy_4', 'dummy_5', 'dummy_6', 'dummy_7', 'dummy_8']
self._enc = OneHotEncoder(handle_unknown='ignore', dtype=np.int)
self._enc.fit([['next'], ['until'], ['and'], ['or'], ['eventually'],
['always'], ['not']] + np.array(terminals).reshape((-1, 1)).tolist())
# To make the caching work.
def __ring_key__(self):
return "ASTBuilder"
@ring.lru(maxsize=30000)
def __call__(self, formula, library="dgl"):
nxg = self._to_graph(formula)
nx.set_node_attributes(nxg, 0., "is_root")
nxg.nodes[0]["is_root"] = 1.
if (library == "networkx"): return nxg
# convert the Networkx graph to dgl graph and pass the 'feat' attribute
# g = dgl.DGLGraph()
# g.from_networkx(nxg, node_attrs=["feat", "is_root"], edge_attrs=["type"]) # dgl does not support string attributes (i.e., token)
g = dgl.from_networkx(nxg, node_attrs=["feat", "is_root"], edge_attrs=["type"])
return g
def _one_hot(self, token):
return torch.LongTensor(self._enc.transform([[token]])[0][0].toarray())
def _get_edge_type(self, operator, parameter_num=None):
operator = operator.lower()
if (operator in ["next", "until", "and", "or"]):
# Uncomment to make "and" and "or" permutation invariant
# parameter_num = 1 if operator in ["and", "or"] else operator
return edge_types[operator + f"_{parameter_num}"]
return edge_types[operator]
# A helper function that recursively builds up the AST of the LTL formula
@ring.lru(maxsize=60000) # Caching the formula->tree pairs in a Last Recently Used fashion
def _to_graph(self, formula, shift=0):
head = formula[0]
rest = formula[1:]
nxg = nx.DiGraph()
if head in ["until", "and", "or"]:
nxg.add_node(shift, feat=self._one_hot(head), token=head)
nxg.add_edge(shift, shift, type=self._get_edge_type("self"))
l = self._to_graph(rest[0], shift+1)
nxg = nx.compose(nxg, l)
nxg.add_edge(shift+1, shift, type=self._get_edge_type("arg1"))
index = nxg.number_of_nodes()
r = self._to_graph(rest[1], shift+index)
nxg = nx.compose(nxg, r)
nxg.add_edge(shift+index, shift, type=self._get_edge_type("arg2"))
return nxg
if head in ["next", "eventually", "always", "not"]:
nxg.add_node(shift, feat=self._one_hot(head), token=head)
nxg.add_edge(shift, shift, type=self._get_edge_type("self"))
l = self._to_graph(rest[0], shift+1)
nxg = nx.compose(nxg, l)
nxg.add_edge(shift+1, shift, type=self._get_edge_type("arg"))
return nxg
if formula in ["True", "False"]:
nxg.add_node(shift, feat=self._one_hot(formula), token=formula)
nxg.add_edge(shift, shift, type=self._get_edge_type("self"))
return nxg
if formula in self.props:
nxg.add_node(shift, feat=self._one_hot(formula.replace("'",'')), token=formula)
nxg.add_edge(shift, shift, type=self._get_edge_type("self"))
return nxg
assert False, "Format error in ast_builder.ASTBuilder._to_graph()"
return None
def draw(G, formula):
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.pyplot as plt
# colors = ["black", "red"]
# edge_color = [colors[i] for i in nx.get_edge_attributes(G,'type').values()]
plt.title(formula)
pos=graphviz_layout(G, prog='dot')
labels = nx.get_node_attributes(G,'token')
nx.draw(G, pos, with_labels=True, arrows=True, labels=labels, node_shape='s', edgelist=list(nx.get_edge_attributes(G,'type')), node_size=500, node_color="white") #edge_color=edge_color
plt.show()
"""
A simple test to check if the ASTBuilder works fine. We do a preorder DFS traversal of the resulting
tree and convert it to a simplified formula and compare the result with the simplified version of the
original formula. They should match.
"""
if __name__ == '__main__':
import re
import sys
import itertools
import matplotlib.pyplot as plt
sys.path.insert(0, '../../')
from ltl_samplers import getLTLSampler
for sampler_id, _ in itertools.product(["Default", "Sequence_2_20"], range(20)):
props = "abcdefghijklmnopqrst"
sampler = getLTLSampler(sampler_id, props)
builder = ASTBuilder(list(set(list(props))))
formula = sampler.sample()
tree = builder(formula, library="networkx")
pre = list(nx.dfs_preorder_nodes(tree, source=0))
draw(tree, formula)
u_tree = tree.to_undirected()
pre = list(nx.dfs_preorder_nodes(u_tree, source=0))
original = re.sub('[,\')(]', '', str(formula))
observed = " ".join([u_tree.nodes[i]["token"] for i in pre])
assert original == observed, f"Test Faield: Expected: {original}, Got: {observed}"
print("Test Passed!")
| 5,910 | 37.383117 | 197 | py |
T2TL | T2TL-main/src/utils/storage.py | import csv
import os
import torch
import logging
import sys
import pickle
import utils
def create_folders_if_necessary(path):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def get_storage_dir():
if "RL_STORAGE" in os.environ:
return os.environ["RL_STORAGE"]
return "storage"
def get_model_dir(model_name, storage_dir="storage"):
return os.path.join(storage_dir, model_name)
def get_status_path(model_dir):
return os.path.join(model_dir, "status.pt")
def get_status(model_dir, args):
path = get_status_path(model_dir)
return torch.load(path, map_location=torch.device(args.cuda if torch.cuda.is_available() else "cpu"))
def save_status(status, model_dir):
path = get_status_path(model_dir)
utils.create_folders_if_necessary(path)
torch.save(status, path)
def get_vocab(model_dir):
return get_status(model_dir)["vocab"]
def get_model_state(model_dir):
return get_status(model_dir)["model_state"]
def get_txt_logger(model_dir):
path = os.path.join(model_dir, "log.txt")
utils.create_folders_if_necessary(path)
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[
logging.FileHandler(filename=path),
logging.StreamHandler(sys.stdout)
]
)
return logging.getLogger()
def get_csv_logger(model_dir):
csv_path = os.path.join(model_dir, "log.csv")
utils.create_folders_if_necessary(csv_path)
csv_file = open(csv_path, "a")
return csv_file, csv.writer(csv_file)
def load_config(model_dir):
path = os.path.join(model_dir, "config.pickle")
if (not os.path.exists(path)):
print(f"No config file found at: {path}")
return pickle.load(open(path, "rb"))
def save_config(model_dir, config):
path = os.path.join(model_dir, "config.pickle")
utils.create_folders_if_necessary(path)
pickle.dump(config, open(path, "wb"))
| 1,978 | 22.282353 | 105 | py |
T2TL | T2TL-main/src/utils/format.py | """
These functions preprocess the observations.
When trying more sophisticated encoding for LTL, we might have to modify this code.
"""
import os
import json
import re
import torch
import torch_ac
import gym
import numpy as np
import utils
from envs import *
from ltl_wrappers import LTLEnv
def get_obss_preprocessor(env, gnn, progression_mode, gnn_type=None):
obs_space = env.observation_space
vocab_space = env.get_propositions()
vocab = None
if isinstance(env, LTLEnv): # LTLEnv Wrapped env
env = env.unwrapped
if isinstance(env, ZonesEnv):
if progression_mode == "partial":
obs_space = {"image": obs_space.spaces["features"].shape, "progress_info": len(vocab_space)}
def preprocess_obss(obss, device=None):
return torch_ac.DictList({
"image": preprocess_images([obs["features"] for obs in obss], device=device),
"progress_info": torch.stack([torch.tensor(obs["progress_info"], dtype=torch.float) for obs in obss], dim=0).to(device)
})
else:
obs_space = {"image": obs_space.spaces["features"].shape, "text": max(22, len(vocab_space) + 10)}
vocab_space = {"max_size": obs_space["text"], "tokens": vocab_space}
vocab = Vocabulary(vocab_space)
tree_builder = utils.ASTBuilder(vocab_space["tokens"])
def preprocess_obss(obss, device=None):
return torch_ac.DictList({
"image": preprocess_images([obs["features"] for obs in obss], device=device),
"text": preprocess_texts([obs["text"] for obs in obss], vocab, vocab_space, gnn=gnn, gnn_type=gnn_type, device=device, ast=tree_builder)
})
preprocess_obss.vocab = vocab
else:
raise ValueError("Unknown observation space: " + str(obs_space))
# Check if obs_space is an image space
elif isinstance(obs_space, gym.spaces.Box):
obs_space = {"image": obs_space.shape}
def preprocess_obss(obss, device=None):
return torch_ac.DictList({
"image": preprocess_images(obss, device=device)
})
else:
raise ValueError("Unknown observation space: " + str(obs_space))
return obs_space, preprocess_obss
def preprocess_images(images, device=None):
# Bug of Pytorch: very slow if not first converted to numpy array
images = np.array(images) # list:16 (each list : len(list)=76) -> {ndarray: (16, 76)}
return torch.tensor(images, device=device, dtype=torch.float)
def preprocess_texts(texts, vocab, vocab_space, gnn=False, gnn_type=None, device=None, **kwargs):
if (gnn):
return preprocess4gnn(texts, kwargs["ast"], device)
return preprocess4rnn(texts, vocab, device)
def preprocess4rnn(texts, vocab, device=None):
var_indexed_texts = []
max_text_len = 25
for text in texts:
text = str(text) # transforming the ltl formula into a string
tokens = re.findall("([a-z]+)", text.lower())
var_indexed_text = np.array([vocab[token] for token in tokens])
var_indexed_texts.append(var_indexed_text)
max_text_len = max(len(var_indexed_text), max_text_len)
indexed_texts = np.zeros((len(texts), max_text_len))
for i, indexed_text in enumerate(var_indexed_texts):
indexed_texts[i, :len(indexed_text)] = indexed_text
return torch.tensor(indexed_texts, device=device, dtype=torch.long)
def preprocess4gnn(texts, ast, device=None):
"""
This function receives the LTL formulas and convert them into inputs for a GNN
"""
return np.array([[ast(text).to(device)] for text in texts])
class Vocabulary:
"""A mapping from tokens to ids with a capacity of `max_size` words.
It can be saved in a `vocab.json` file."""
def __init__(self, vocab_space):
self.max_size = vocab_space["max_size"]
self.vocab = {}
# populate the vocab with the LTL operators
for item in ['next', 'until', 'and', 'or', 'eventually', 'always', 'not', 'True', 'False']:
self.__getitem__(item)
for item in vocab_space["tokens"]:
self.__getitem__(item)
def load_vocab(self, vocab):
self.vocab = vocab
def __getitem__(self, token):
if not token in self.vocab.keys():
if len(self.vocab) >= self.max_size:
raise ValueError("Maximum vocabulary capacity reached")
self.vocab[token] = len(self.vocab) + 1
return self.vocab[token]
| 4,698 | 35.710938 | 161 | py |
T2TL | T2TL-main/src/utils/evaluator.py | import time
import torch
from torch_ac.utils.penv import ParallelEnv
#import tensorboardX
import utils
import argparse
import datetime
class Eval:
def __init__(self, env, model_name, ltl_sampler,
seed=0, device="cpu", argmax=False,
num_procs=1, ignoreLTL=False, progression_mode=True, gnn=None, recurrence=1, dumb_ac = False, discount=0.99):
self.env = env
self.device = device
self.argmax = argmax
self.num_procs = num_procs
self.ignoreLTL = ignoreLTL
self.progression_mode = progression_mode
self.gnn = gnn
self.recurrence = recurrence
self.dumb_ac = dumb_ac
self.discount = discount
self.model_dir = utils.get_model_dir(model_name, storage_dir="")
#self.tb_writer = tensorboardX.SummaryWriter(self.model_dir + "/eval-" + ltl_sampler)
# Load environments for evaluation
eval_envs = []
for i in range(self.num_procs):
eval_envs.append(utils.make_env(env, progression_mode, ltl_sampler, seed, 0, False))
eval_envs[0].reset()
if isinstance(eval_envs[0].env, LetterEnv):
for env in eval_envs:
env.env.map = eval_envs[0].env.map
self.eval_envs = ParallelEnv(eval_envs)
def eval(self, num_frames, episodes=100, stdout=True):
# Load agent
agent = utils.Agent(self.eval_envs.envs[0], self.eval_envs.observation_space, self.eval_envs.action_space, self.model_dir + "/train",
self.ignoreLTL, self.progression_mode, self.gnn, recurrence = self.recurrence, dumb_ac = self.dumb_ac, device=self.device, argmax=self.argmax, num_envs=self.num_procs)
# Run agent
start_time = time.time()
obss = self.eval_envs.reset()
log_counter = 0
log_episode_return = torch.zeros(self.num_procs, device=self.device)
log_episode_num_frames = torch.zeros(self.num_procs, device=self.device)
# Initialize logs
logs = {"num_frames_per_episode": [], "return_per_episode": []}
while log_counter < episodes:
actions = agent.get_actions(obss)
obss, rewards, dones, _ = self.eval_envs.step(actions)
agent.analyze_feedbacks(rewards, dones)
log_episode_return += torch.tensor(rewards, device=self.device, dtype=torch.float)
log_episode_num_frames += torch.ones(self.num_procs, device=self.device)
for i, done in enumerate(dones):
if done:
log_counter += 1
logs["return_per_episode"].append(log_episode_return[i].item())
logs["num_frames_per_episode"].append(log_episode_num_frames[i].item())
mask = 1 - torch.tensor(dones, device=self.device, dtype=torch.float)
log_episode_return *= mask
log_episode_num_frames *= mask
end_time = time.time()
return logs["return_per_episode"], logs["num_frames_per_episode"]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ltl-sampler", default="Default",
help="the ltl formula template to sample from (default: DefaultSampler)")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--model-paths", required=True, nargs="+",
help="path of the model, or a regular expression")
parser.add_argument("--procs", type=int, default=1,
help="number of processes (default: 1)")
parser.add_argument("--eval-episodes", type=int, default=5,
help="number of episodes to evaluate on (default: 5)")
parser.add_argument("--env", default="Letter-7x7-v3",
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--discount", type=float, default=0.99,
help="discount factor (default: 0.99)")
parser.add_argument("--ignoreLTL", action="store_true", default=False,
help="the network ignores the LTL input")
parser.add_argument("--progression-mode", default="full",
help="Full: uses LTL progression; partial: shows the propositions which progress or falsify the formula; none: only original formula is seen. ")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--gnn", default="RGCN_8x32_ROOT_SHARED", help="use gnn to model the LTL (only if ignoreLTL==True)")
args = parser.parse_args()
logs_returns_per_episode = []
logs_num_frames_per_episode = []
for model_path in args.model_paths:
idx = model_path.find("seed:") + 5
seed = int(model_path[idx:idx+2].strip("_"))
eval = utils.Eval(args.env, model_path, args.ltl_sampler,
seed=seed, device=torch.device("cpu"), argmax=False,
num_procs=args.procs, ignoreLTL=args.ignoreLTL, progression_mode=args.progression_mode, gnn=args.gnn, recurrence=args.recurrence, dumb_ac=False, discount=args.discount)
rpe, nfpe = eval.eval(-1, episodes=args.eval_episodes, stdout=True)
logs_returns_per_episode += rpe
logs_num_frames_per_episode += nfpe
print(sum(rpe), seed, model_path)
print(logs_num_frames_per_episode)
print(logs_returns_per_episode)
num_frame_pe = sum(logs_num_frames_per_episode)
return_per_episode = utils.synthesize(logs_returns_per_episode)
num_frames_per_episode = utils.synthesize(logs_num_frames_per_episode)
average_discounted_return, error = utils.average_discounted_return(logs_returns_per_episode, logs_num_frames_per_episode, args.discount, include_error=True)
header = ["frames"]
data = [num_frame_pe]
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["average_discounted_return", "err"]
data += [average_discounted_return, error]
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
for field, value in zip(header, data):
print(field, value)
| 6,373 | 42.067568 | 189 | py |
T2TL | T2TL-main/src/utils/agent.py | import torch
import utils
from model import ACModel
from recurrent_model import RecurrentACModel
class Agent:
"""An agent.
It is able:
- to choose an action given an observation,
- to analyze the feedback (i.e. reward and done state) of its action."""
def __init__(self, env, obs_space, action_space, model_dir, ignoreLTL, progression_mode,
gnn, recurrence = 1, dumb_ac = False, device=None, argmax=False, num_envs=1):
try:
print(model_dir)
status = utils.get_status(model_dir)
except OSError:
status = {"num_frames": 0, "update": 0}
using_gnn = (gnn != "GRU" and gnn != "LSTM")
obs_space, self.preprocess_obss = utils.get_obss_preprocessor(env, using_gnn, progression_mode)
if "vocab" in status and self.preprocess_obss.vocab is not None:
self.preprocess_obss.vocab.load_vocab(status["vocab"])
if recurrence > 1:
self.acmodel = RecurrentACModel(env, obs_space, action_space, ignoreLTL, gnn, dumb_ac, True)
self.memories = torch.zeros(num_envs, self.acmodel.memory_size, device=device)
else:
self.acmodel = ACModel(env, obs_space, action_space, ignoreLTL, gnn, dumb_ac, True)
self.device = device
self.argmax = argmax
self.num_envs = num_envs
self.acmodel.load_state_dict(utils.get_model_state(model_dir))
self.acmodel.to(self.device)
self.acmodel.eval()
def get_actions(self, obss):
preprocessed_obss = self.preprocess_obss(obss, device=self.device)
with torch.no_grad():
if self.acmodel.recurrent:
dist, _, self.memories = self.acmodel(preprocessed_obss, self.memories)
else:
dist, _ = self.acmodel(preprocessed_obss)
if self.argmax:
actions = dist.probs.max(1, keepdim=True)[1]
else:
actions = dist.sample()
return actions.cpu().numpy()
def get_action(self, obs):
return self.get_actions([obs])[0]
def analyze_feedbacks(self, rewards, dones):
if self.acmodel.recurrent:
masks = 1 - torch.tensor(dones, dtype=torch.float).unsqueeze(1)
self.memories *= masks
def analyze_feedback(self, reward, done):
return self.analyze_feedbacks([reward], [done]) | 2,374 | 33.926471 | 104 | py |
T2TL | T2TL-main/src/utils/other.py | import random
import numpy
import torch
import collections
def seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def synthesize(array):
d = collections.OrderedDict()
d["mean"] = numpy.mean(array)
d["std"] = numpy.std(array)
d["min"] = numpy.amin(array)
d["max"] = numpy.amax(array)
return d
def average_reward_per_step(returns, num_frames):
avgs = []
assert(len(returns) == len(num_frames))
for i in range(len(returns)):
avgs.append(returns[i] / num_frames[i])
return numpy.mean(avgs)
def average_discounted_return(returns, num_frames, disc):
discounted_returns = []
assert(len(returns) == len(num_frames))
for i in range(len(returns)):
discounted_returns.append(returns[i] * (disc ** (num_frames[i]-1)))
return numpy.mean(discounted_returns) | 941 | 21.97561 | 75 | py |
T2TL | T2TL-main/src/gnns/graphs/GCN.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.conv import GraphConv
from gnns.graphs.GNN import GNN
class GCN(GNN):
def __init__(self, input_dim, output_dim, **kwargs):
super().__init__(input_dim, output_dim)
hidden_dims = kwargs.get('hidden_dims', [32])
self.num_layers = len(hidden_dims)
hidden_plus_input_dims = [hd + input_dim for hd in hidden_dims]
self.convs = nn.ModuleList([GraphConv(in_dim, out_dim, activation=F.relu) for (in_dim, out_dim)
in zip([input_dim] + hidden_plus_input_dims[:-1], hidden_dims)])
self.g_embed = nn.Linear(hidden_dims[-1], output_dim)
# Uses the base implementation which averages hidden representations of all nodes
def forward(self, g):
g = np.array(g).reshape((1, -1)).tolist()[0]
g = dgl.batch(g)
h_0 = g.ndata["feat"].float()
h = h_0
for i in range(self.num_layers):
if i != 0:
h = self.convs[i](g, torch.cat([h, h_0], dim=1))
else:
h = self.convs[i](g, h)
g.ndata['h'] = h
# Calculate graph representation by averaging all the hidden node representations.
hg = dgl.mean_nodes(g, 'h')
return self.g_embed(hg).squeeze(1)
# GCN, but the graph representation is only the representation of the root node.
class GCNRoot(GCN):
def __init__(self, input_dim, output_dim, **kwargs):
super().__init__(input_dim, output_dim, **kwargs)
def forward(self, g):
g = np.array(g).reshape((1, -1)).tolist()[0]
g = dgl.batch(g)
h_0 = g.ndata["feat"].float()
h = h_0
for i in range(self.num_layers):
if i != 0:
h = self.convs[i](g, torch.cat([h, h_0], dim=1))
else:
h = self.convs[i](g, h)
g.ndata['h'] = h
hg = dgl.sum_nodes(g, 'h', weight='is_root')
return self.g_embed(hg).squeeze(1)
class GCNRootShared(GNN):
def __init__(self, input_dim, output_dim, **kwargs):
super().__init__(input_dim, output_dim)
hidden_dim = kwargs.get('hidden_dim', 32)
num_layers = kwargs.get('num_layers', 2)
self.num_layers = num_layers
self.linear_in = nn.Linear(input_dim, hidden_dim)
self.conv = GraphConv(2*hidden_dim, hidden_dim, activation=F.relu)
self.g_embed = nn.Linear(hidden_dim, output_dim)
def forward(self, g):
g = np.array(g).reshape((1, -1)).tolist()[0]
g = dgl.batch(g)
h_0 = self.linear_in(g.ndata["feat"].float())
h = h_0
# Apply convolution layers
for i in range(self.num_layers):
h = self.conv(g, torch.cat([h, h_0], dim=1))
g.ndata['h'] = h
hg = dgl.sum_nodes(g, 'h', weight='is_root')
return self.g_embed(hg).squeeze(1)
| 2,927 | 31.898876 | 103 | py |
T2TL | T2TL-main/src/gnns/graphs/RGCN.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.conv import RelGraphConv
from gnns.graphs.GNN import GNN
from utils.ast_builder import edge_types
class RGCN(GNN):
def __init__(self, input_dim, output_dim, **kwargs):
super().__init__(input_dim, output_dim)
hidden_dims = kwargs.get('hidden_dims', [32])
self.num_layers = len(hidden_dims)
hidden_plus_input_dims = [hd + input_dim for hd in hidden_dims]
self.convs = nn.ModuleList([RelGraphConv(in_dim, out_dim, len(edge_types), activation=F.relu)
for (in_dim, out_dim) in zip([input_dim] + hidden_plus_input_dims[:-1], hidden_dims)])
self.g_embed = nn.Linear(hidden_dims[-1], output_dim)
def forward(self, g):
g = np.array(g).reshape((1, -1)).tolist()[0]
g = dgl.batch(g)
h_0 = g.ndata["feat"].float()
h = h_0
etypes = g.edata["type"].float()
for i in range(self.num_layers):
if i != 0:
h = self.convs[i](g, torch.cat([h, h_0], dim=1), etypes)
else:
h = self.convs[i](g, h, etypes)
g.ndata['h'] = h
# Calculate graph representation by averaging all the hidden node representations.
hg = dgl.mean_nodes(g, 'h')
return self.g_embed(hg).squeeze(1)
class RGCNRoot(RGCN):
def __init__(self, input_dim, output_dim, **kwargs):
super().__init__(input_dim, output_dim, **kwargs)
def forward(self, g):
g = np.array(g).reshape((1, -1)).tolist()[0]
g = dgl.batch(g)
h_0 = g.ndata["feat"].float().squeeze()
h = h_0
etypes = g.edata["type"]
for i in range(self.num_layers):
if i != 0:
h = self.convs[i](g, torch.cat([h, h_0], dim=1), etypes)
else:
h = self.convs[i](g, h, etypes)
g.ndata['h'] = h # TODO: Check if this is redundant
hg = dgl.sum_nodes(g, 'h', weight='is_root')
return self.g_embed(hg).squeeze(1)
class RGCNRootShared(GNN):
def __init__(self, input_dim, output_dim, **kwargs): # 22; 32; {'hidden_dim': 32, 'num_layers': 8}
super().__init__(input_dim, output_dim)
hidden_dim = kwargs.get('hidden_dim', 32)
num_layers = kwargs.get('num_layers', 2)
self.num_layers = num_layers
self.linear_in = nn.Linear(input_dim, hidden_dim)
self.conv = RelGraphConv(2*hidden_dim, hidden_dim, len(edge_types), activation=torch.tanh)
self.g_embed = nn.Linear(hidden_dim, output_dim)
def forward(self, g):
g = np.array(g).reshape((1, -1)).tolist()[0]
g = dgl.batch(g)
h_0 = self.linear_in(g.ndata["feat"].float().squeeze())
h = h_0
etypes = g.edata["type"]
# Apply convolution layers
for i in range(self.num_layers):
h = self.conv(g, torch.cat([h, h_0], dim=1), etypes)
g.ndata['h'] = h
g.ndata['is_root'] = g.ndata['is_root'].unsqueeze(1)
hg = dgl.sum_nodes(g, 'h', weight='is_root')
return self.g_embed(hg).squeeze(1)
| 3,153 | 32.913978 | 103 | py |
T2TL | T2TL-main/src/gnns/graphs/GNN.py | import torch
import torch.nn as nn
from gnns import *
class GNN(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
def forward(self, g):
raise NotImplementedError
def GNNMaker(gnn_type, input_dim, output_dim): # 'RGCN_8x32_ROOT_SHARED'; 22; 33
clazz = lookup(gnn_type)
return clazz["class"](input_dim, output_dim, **clazz["kwargs"])
| 393 | 23.625 | 81 | py |
toulbar2 | toulbar2-master/web/TUTORIALS/sudoku/MNIST_train.py | from __future__ import print_function
import argparse
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--save-test-marginals', action='store_true', default=False,
help='For Saving the marginal scores of the Model on the test set')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_set = datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
test_loader = torch.utils.data.DataLoader(test_set,
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if args.save_test_marginals:
img_logits = [[],[],[],[],[],[],[],[],[],[]]
img_indces = [[],[],[],[],[],[],[],[],[],[]]
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)
for idx, (data, target) in enumerate(test_loader):
with torch.no_grad():
label = target.data.numpy()[0]
logits = model.forward(data)
img_logits[label].append(-logits.data.numpy().squeeze())
img_indces[label].append(idx)
with open("MNIST_test_marginal","wb") as f:
pickle.dump( img_logits , f)
# restore with img_logits = pickle.load(open("MNIST_test_marginal", "rb" ))
with open("MNIST_test_indices","wb") as f:
pickle.dump( img_indces , f)
# restore with img_indces = pickle.load(open("MNIST_test_indices", "rb" ))
data = iter(torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False))
images = list(map(lambda x: x[0].reshape(28,28), data))
white = np.zeros((28,28))
mpl.rcParams['toolbar'] = 'None'
plt.style.use('dark_background')
fig, axs = plt.subplots(9, 9,figsize=(5,5))
for i in range(9):
for j in range(9):
axs[i][j].set_axis_off()
if (i==j):
axs[i][j].imshow(white,cmap=plt.get_cmap('Greys'))
else:
axs[i][j].imshow(images[i*9+j],cmap=plt.get_cmap('Greys'))
fig.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.2)
plt.show()
if __name__ == '__main__':
main()
| 6,939 | 40.065089 | 97 | py |
toulbar2 | toulbar2-master/web/TUTORIALS/sudoku/MNIST_sudoku.py | import pytoulbar2
import math, numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pickle
import torch
from torchvision import datasets, transforms
import itertools
import pandas as pd
import hashlib
##########################################################################
# Image output routines
##########################################################################
def fillImage(fig,axsf,g,ph,cs):
for v,h in enumerate(g):
axsf[v].set_axis_off()
if h:
if (ph[v]):
if h != int(cs[v]):
h = int(cs[v])
mycmap = plt.get_cmap('Purples_r')
elif ph[v] != int(cs[v]):
mycmap = plt.get_cmap('Greens_r')
else:
mycmap = plt.get_cmap('Greys_r')
else:
if h != int(cs[v]):
mycmap = plt.get_cmap('Purples')
else:
mycmap = plt.get_cmap('Greys')
axsf[v].imshow(MNIST_image(cs,v,h),cmap=mycmap)
else:
axsf[v].imshow(np.zeros((28,28)))
fig.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.2)
# Prepare figure with flat axis for easier access
mpl.rcParams['toolbar'] = 'None'
plt.style.use('dark_background')
figs, axss = plt.subplots(9, 9,figsize=(5,5))
axssf = axss.flatten()
##########################################################################
# Loads MNIST images and outputs on every test set image
##########################################################################
test_set = datasets.MNIST('./data', download = True, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
data = iter(torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False))
images = list(map(lambda x: x[0].reshape(28,28), data))
# Load MNIST outputs and image indices for every MNIST test digit
logits = pickle.load(open("MNIST_test_marginal", "rb"))
logits_len = list(map(lambda x: len(x), logits))
img_indces = pickle.load(open("MNIST_test_indices", "rb"))
def myhash(str):
return int(hashlib.sha512(str.encode('utf-8')).hexdigest(), 16)
def MNIST_output(cg,p,val):
h = myhash(cg+str(p))
return logits[val][h % logits_len[val]]
def MNIST_image(cg,p,val):
h = myhash(cg+str(p))
return images[img_indces[val][h % logits_len[val]]]
##########################################################################
# Sudoku grids loading
##########################################################################
# Load grid/solution pairs from the validation set of the RRN paper
valid = pd.read_csv("valid.csv.xz",sep=",", header=None).values
hints = valid[:][:,0]
sols = valid[:][:,1]
size = math.isqrt(len(sols[0]))
par = math.isqrt(size)
def MNIST_fails(lg):
lf = []
for i,cg in enumerate(lg):
mygrid = [int(h) for h in cg]
ok = True
for v,h in enumerate(mygrid):
if h and (np.argmin(MNIST_output(cg,v,h)) != h):
ok = False
if (not ok): lf.append(i)
return lf
##########################################################################
# Auxiliary CFN functions for Sudoku
##########################################################################
# Adds a clique of differences with violation "cost" on "varList"
def addCliqueAllDiff(theCFN, varList, cost):
different = (cost*np.identity(size, dtype=np.int64)).flatten()
for vp in itertools.combinations(varList,2):
theCFN.AddFunction(vp,different)
# Sets the value of variable with index "vIdx" to "value" using a unary function
def setHint(theCFN,vIdx,value):
costs = theCFN.GetUB()*np.ones(size, dtype = np.int64)
costs[value-1] = 0
theCFN.AddFunction([vIdx], costs)
# Add a MNIST minus log prob cost on vIdx. Uncalibrated yet decent
def setProbHint(theCFN,vIdx,mlp):
theCFN.AddFunction([vIdx], mlp[1:])
##########################################################################
# Auxiliary CFN functions for Sudoku : 6 13
##########################################################################
CP_mode = False
grid_number = 13
cgrid = hints[grid_number]
csol = sols[grid_number]
grid = [int(h) for h in cgrid]
# list of row, column and cells variable indices
rows = [ [] for _ in range(size) ]
columns = [ [] for _ in range(size) ]
cells = [ [] for _ in range(size) ]
myCFN = pytoulbar2.CFN(1) if CP_mode else pytoulbar2.CFN(1000000,6)
# create variables and keep indices in row, columns and cells
for i in range(size):
for j in range(size):
vIdx = myCFN.AddVariable("X"+str(i+1)+"."+str(j+1),range(1,size+1))
columns[j].append(vIdx)
rows[i].append(vIdx)
cells[(i//par)*par+(j//par)].append(vIdx)
# add the clique constraints on rows, columns and cells
for scope in rows+columns+cells:
addCliqueAllDiff(myCFN,scope, myCFN.GetUB())
# assign/bias variables
pgrid = []
for v,h in enumerate(grid):
if h:
prediction = np.argmin(MNIST_output(csol,v,h))
pgrid.append(prediction)
if (prediction != h):
row = v//size
col = v % size
print("Erreur MNIST on cell",row+1,col+1,", a", h,"has been predicted as", prediction)
if (CP_mode):
setHint(myCFN,v,prediction)
else:
setProbHint(myCFN,v,MNIST_output(csol,v,h))
else:
pgrid.append(0)
sol = myCFN.Solve()
if (sol):
fillImage(figs,axssf,sol[0],pgrid,csol)
else:
fillImage(figs,axssf,pgrid,pgrid,csol)
print("No solution found")
plt.show()
| 5,697 | 33.325301 | 98 | py |
toulbar2 | toulbar2-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
docs_path = os.path.normpath(os.path.abspath('.'))
# python source code path
pytoulbar2_code_path = os.path.normpath(os.path.join(docs_path, "..", "..",
"pytoulbar2"))
sys.path.insert(0, pytoulbar2_code_path)
# -- Project information -----------------------------------------------------
project = 'toulbar2'
copyright = '2022, INRAE'
author = 'INRAE'
# The short X.Y version
version = '1.0'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
#'sphinx.ext.imgmath',
#'sphinx.ext.mathjax',
'sphinx.ext.githubpages', # => .nojekyll file
'sphinx.ext.graphviz',
'breathe',
'myst_parser',
'sphinx.ext.autosectionlabel',
]
# Breathe
breathe_default_project = "toulbar2cpp"
breathe_projects = {
"toulbar2cpp" : os.path.normpath(os.path.join(docs_path, "..", "..",
"build", "xml")),
}
#breathe_implementation_filename_extensions = ['.c', '.cc', '.cpp']
# Prefix document path to section labels, to use:
# `path/to/file:heading` instead of just `heading`
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = [os.path.join(docs_path,'_templates')]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['Thumbs.db', '.DS_Store',
#'_build', _pyvenv', 'README.md',
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
#'logo_only': False,
#'display_version': True,
#'prev_next_buttons_location': 'bottom',
#'style_external_links': False,
#'vcs_pageview_mode': '',
#'style_nav_header_background': 'FireBrick',
## Toc options
#'collapse_navigation': True,
#'sticky_navigation': True,
'navigation_depth': 4,
#'includehidden': True, # False,
#'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [os.path.join(docs_path,'_static')]
html_style = 'css/toulbar2.css'
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
#html_logo =
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'toulbar2doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# remove blank pages (between the title page and the TOC, etc.)
'classoptions': ',openany,oneside',
'babel' : '\\usepackage[english]{babel}',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
# extracts
('examples/tutorials_pdf', 'tutorials.tex',
'', # to keep .rst title
'INRAE', 'manual'),
('examples/usecases_pdf', 'usecases.tex',
'', # to keep .rst title
'INRAE', 'manual'),
('refman', 'refman.tex',
'toulbar2 Reference Manual',
'INRAE', 'manual'),
('userdoc', 'userdoc.tex',
'toulbar2 User Guide',
'INRAE', 'manual'),
('formats/wcspformat', 'WCSP_format.tex',
'', # to keep .rst title
'INRAE', 'manual'),
('formats/cfnformat', 'CFN_format.tex',
'', # to keep .rst title
'INRAE', 'manual'),
# api ref
('ref/ref_cpp', 'cpp_library.tex',
'C++ Library of toulbar2',
'INRAE', 'manual'),
('ref/ref_python', 'python_library.tex',
'Python Library of toulbar2',
'INRAE', 'manual'),
# main
('index_pdf', 'toulbar2.tex',
'toulbar2 Documentation',
'INRAE', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'toulbar2', 'toulbar2 Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'toulbar2', 'toulbar2 Documentation',
author, 'toulbar2', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 7,750 | 28.471483 | 79 | py |
StyleFusion | StyleFusion-master/src/model.py | from shared import *
from tf_lib import *
from dataset import *
from decode import *
from evaluate import *
"""
AUTHOR: Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
class ModelBase:
def __init__(self):
self.fld = None # str
self.n_trained = None # int
self.max_n_trained = None # int
self.dataset = None # Dataset obj
self.extra = None # list of str
self.vali_data = None # dict of list
self.layers = None
def init_log(self, new, args):
# deal with existing fld
if new and os.path.exists(self.fld):
if PHILLY:
suffix = 0
while True:
fld = self.fld + '_%i'%suffix
if not os.path.exists(fld):
self.fld = fld
break
else:
if not PHILLY and not self.debug:
print('%s\nalready exists, do you want to delete the folder? (y/n)'%self.fld)
ans = input()
if not ans.lower() == 'y':
exit()
print('deleting fld: '+self.fld)
shutil.rmtree(self.fld)
time.sleep(0.1)
print('fld deleted')
self.log_train = self.fld + '/train.txt'
if new or PHILLY or hostname != 'MININT-3LHNLKS':
makedirs(os.path.join(self.fld, 'models'))
open(self.log_train, 'w')
if not os.path.exists(self.fld + '/vocab.txt'):
shutil.copyfile(self.dataset.path_vocab, self.fld + '/vocab.txt')
ss = []
for k in sorted(args.__dict__.keys()):
ss.append('%s = %s'%(k, args.__dict__[k]))
with open(self.fld + '/args.txt', 'w') as f:
f.write('\n'.join(ss))
if PHILLY:
with open(self.log_train, 'a') as f:
f.write('hostname: %s\n'%hostname)
f.write('data_path: %s\n'%DATA_PATH)
f.write('out_path: %s\n'%OUT_PATH)
def train(self, batch_per_load=100):
self.vali()
while self.n_trained < self.max_n_trained:
s = '\n***** trained %.3f M'%(self.n_trained/1e6)
for tp in self.dataset.n_reset['train']:
s += ', %s = %i'%(tp, self.dataset.n_reset['train'][tp])
s += ' *****'
write_log(self.log_train, s)
self.train_a_load(batch_per_load)
if self.debug:
exit()
def load_weights(self, path):
self.prev_wt_fuse = None
print('loading weights from %s'%path)
npz = np.load(path, encoding='latin1', allow_pickle=True)
print(npz.files)
weights = npz['layers'].item()
for k in weights:
s = ' '*(20-len(k)) + k + ': %i params: '%len(weights[k])
for wt in weights[k]:
s += str(wt.shape) + ', '
print(s)
for attr in self.extra:
if attr in npz:
if attr not in ['name']:
setattr(self, attr, npz[attr])
else:
print('WARNING! attr %s not in npz'%attr)
self.build_model(weights)
self.build_model_test()
def extract_weights(self):
weights = dict()
if self.layers is None:
return weights
for k in self.layers:
weights[k] = self.layers[k].get_weights()
return weights
def save_weights(self):
path = self.fld + '/models/%.1fM.npz'%(self.n_trained/1e6)
weights = self.extract_weights()
to_save = {'layers':weights}
for attr in self.extra:
to_save[attr] = getattr(self, attr)
n_try = 0
while n_try < 3:
try:
np.savez(path, **to_save)
print('saved to: '+path)
break
except:
n_try += 1
print('cannot save, try %i'%n_try)
return path
def build_model_test(self):
pass
def build_model(self, weights=dict()):
pass
def train_a_load(self, batch_per_load):
pass
def set_extra(self, npz):
pass
class Seq2SeqBase(ModelBase):
def __init__(self, dataset, fld, args, new=False, allowed_words=None):
self.dataset = dataset
self.fld = fld
self.allowed_words = allowed_words
self.layers = None
self.history = LossHistory()
self.vali_data = None
self.classifiers = []
self.n_batch = 0
self.prev_n_batch = 0
self.dn_batch_vali = 100
self.bias_conv = False # hasattr(self.dataset, 'files') and ('bias_conv' in self.dataset.files['train'])
self.debug = args.debug
self.token_embed_dim = args.token_embed_dim
self.rnn_units = args.rnn_units
self.encoder_depth = args.encoder_depth
self.decoder_depth = args.decoder_depth
self.lr = args.lr
self.max_n_trained = args.max_n_trained
self.randmix = False
self.mix_ratio = (args.conv_mix_ratio, args.nonc_mix_ratio)
if not self.bias_conv:
assert(args.conv_mix_ratio == 0.)
self.extra = ['name']
self.init_extra(args)
if hasattr(args, 'skip'):
skip = int(1e6*args.skip)
else:
skip = 0
self.dataset.skip(skip, self.mix_ratio, conv_only=(self.name=='s2s'))
self.n_trained = skip
self.init_log(new, args)
self.build_model()
def get_mix_ratio(self):
if self.randmix:
ret = []
for ratio in self.mix_ratio:
p = [1. - ratio, ratio]
ret.append(np.random.choice([0.,1.], 1, p=p)[0])
return tuple(ret)
else:
return self.mix_ratio
def fit(self, inputs, outputs):
n_try = 0
if self.debug:
self.model.fit(
inputs,
outputs,
batch_size=BATCH_SIZE,
callbacks=[self.history],
verbose=FIT_VERBOSE)
return
while n_try < 3:
try:
self.model.fit(
inputs,
outputs,
batch_size=BATCH_SIZE,
callbacks=[self.history],
verbose=FIT_VERBOSE)
return
except Exception as e:
print('got error, sleeping')
print('E'*20)
print(e)
print('E'*20)
time.sleep(1)
n_try += 1
def _stacked_rnn(self, rnns, inputs, initial_states=None):
if initial_states is None:
initial_states = [None] * len(rnns)
outputs, state = rnns[0](inputs, initial_state=initial_states[0])
states = [state]
for i in range(1, len(rnns)):
outputs, state = rnns[i](outputs, initial_state=initial_states[i])
states.append(state)
return outputs, states
def _build_encoder(self, inputs, prefix):
_, encoder_states = self._stacked_rnn(
[self.layers['%s_encoder_rnn_%i'%(prefix, i)] for i in range(self.encoder_depth)],
self.layers['embedding'](inputs))
latent = encoder_states[-1]
return latent
def _build_decoder(self, input_seqs, input_states):
"""
for auto-regressive, states are returned and used as input for the generation of the next token
for teacher-forcing, token already given, so only need init states
"""
decoder_outputs, decoder_states = self._stacked_rnn(
[self.layers['decoder_rnn_%i'%i] for i in range(self.decoder_depth)],
self.layers['embedding'](input_seqs),
input_states)
decoder_outputs = self.layers['decoder_softmax'](decoder_outputs)
return decoder_outputs, decoder_states
def _create_layers(self, weights=dict()):
layers = dict()
name = 'embedding'
params = _params(name, weights, {'mask_zero':True})
layers[name] = Embedding(
self.dataset.num_tokens + 1, # +1 as mask_zero
self.token_embed_dim,
**params)
for i in range(self.decoder_depth):
name = 'decoder_rnn_%i'%i
params = _params(name, weights, {'return_state':True, 'return_sequences':True})
layers[name] = GRU(
self.rnn_units,
**params)
for prefix in self.prefix:
for i in range(self.encoder_depth):
name = '%s_encoder_rnn_%i'%(prefix, i)
params = _params(name, weights, {'return_state':True, 'return_sequences':True})
layers[name] = GRU(
self.rnn_units,
**params)
name = 'decoder_softmax'
params = _params(name, weights, {'activation':'softmax'})
layers[name] = Dense(
self.dataset.num_tokens + 1, # +1 as mask_zero
**params)
return layers
def build_model_test(self):
#self.refresh_session()
decoder_inputs = Input(shape=(None,), name='decoder_inputs')
# encoder
self.model_encoder = dict()
self.model_tf = dict()
self.tf_history = dict()
for prefix in self.prefix:
encoder_inputs = Input(shape=(None,), name=prefix+'_encoder_inputs')
latent = self._build_encoder(encoder_inputs, prefix=prefix)
self.model_encoder[prefix] = Model(encoder_inputs, latent)
self.model_encoder[prefix]._make_predict_function()
decoder_outputs, _ = self._build_decoder(decoder_inputs, [latent]*self.decoder_depth)
self.model_tf[prefix] = Model([encoder_inputs, decoder_inputs], decoder_outputs)
for layer in self.model_tf[prefix].layers:
layer.trainable = False
self.model_tf[prefix].compile(Adam(lr=0.), loss=_dec_loss) # lr = 0 to use '.fit', which has callbacks, as '.evaluate'
self.tf_history[prefix] = LossHistory()
# decoder: autoregressive
decoder_inital_states = []
for i in range(self.decoder_depth):
decoder_inital_states.append(Input(shape=(self.rnn_units,), name="decoder_inital_state_%i"%i))
decoder_outputs, decoder_states = self._build_decoder(decoder_inputs, decoder_inital_states)
model_decoder = Model(
[decoder_inputs] + decoder_inital_states,
[decoder_outputs] + decoder_states)
model_decoder._make_predict_function()
self.decoder = Decoder(self.dataset, model_decoder,
self.decoder_depth, self.rnn_units, allowed_words=self.allowed_words)
def get_vali_data(self):
if self.vali_data is not None:
#print('returning self.vali_data', self.vali_data)
return self.vali_data
print('getting vali data...')
def _feed_vali(k):
self.dataset.reset('vali')
d = self.dataset.feed_data('vali', max_n=vali_size, check_src=True, mix_ratio=k, conv_only=(self.name=='s2s'))
self.dataset.reset('vali')
return d
if self.debug:
vali_size = BATCH_SIZE
else:
vali_size = 1000
self.vali_data = _feed_vali((0, 1))
"""
self.vali_data['base'] = _feed_vali((0, 0))
self.vali_data['mix'] = _feed_vali(self.mix_ratio)
if self.bias_conv:
self.vali_data['bias'] = _feed_vali((1, 1))
else:
self.vali_data['bias'] = _feed_vali((0, 1))
"""
return self.vali_data
def vali(self):
self.build_model_test()
ss = []
for inp in ['who is he ?', 'do you like this game ?', 'good morning .']:
ss.append(infer_comb(inp, self))
write_log(self.log_train, '\n'.join(ss))
"""
data = self.get_vali_data()
if self.name.startswith('fuse'):
r_rand = 0.1 * np.sqrt(self.rnn_units)
else:
r_rand = 0.
#s_decoded = ''#eval_decoded(self, data, self.classifiers, r_rand=r_rand)[0]
#s_surrogate = eval_surrogate(self, data)[0]
#write_log(self.log_train, '\n' + s_decoded + '\n\n' + s_surrogate + '\n')
"""
self.prev_n_batch = self.n_batch
# save --------------------
self.save_weights()
def init_extra(self, args):
pass
def train_a_load(self, batch_per_load):
mix_ratio = self.get_mix_ratio()
data = self.dataset.feed_data('train', BATCH_SIZE * batch_per_load, mix_ratio=mix_ratio, conv_only=(self.name == 's2s'))
n_sample, inputs, outputs = self._inp_out_data(data)
t0 = datetime.datetime.now()
t0_str = str(t0).split('.')[0]
write_log(self.log_train, 'start: %s'%t0_str + ', mix_ratio = '+str(mix_ratio))
print('fitting...')
self.fit(inputs, outputs)
self.n_trained += n_sample
self.n_batch += batch_per_load
dt = (datetime.datetime.now() - t0).seconds
loss = np.mean(self.history.losses)
write_log(self.log_train, 'n_batch: %i, prev %i'%(self.n_batch, self.prev_n_batch))
ss = ['spent: %i sec'%dt, 'train: %.4f'%loss]
write_log(self.log_train, '\n'.join(ss))
if not self.debug and (self.n_batch - self.prev_n_batch < self.dn_batch_vali):
return
# vali --------------------
self.vali()
def print_loss(self, loss_weights):
s = 'loss: '+'-'*20 + '\n'
for i in range(len(self.loss)):
loss_name = str(self.loss[i])
if loss_name.startswith('<func'):
loss_name = loss_name.split()[1]
s += '%6.2f '%loss_weights[i] + loss_name + '\n'
s += '-'*20 + '\n'
write_log(self.log_train, s)
class Seq2Seq(Seq2SeqBase):
def init_extra(self, args):
self.name = 's2s'
self.prefix = ['S2S']
def build_model(self, weights=dict()):
self.layers = self._create_layers(weights) # create new
encoder_inputs = Input(shape=(None,), name='encoder_inputs')
decoder_inputs = Input(shape=(None,), name='decoder_inputs')
# connections: teacher forcing
latent = self._build_encoder(encoder_inputs, self.prefix[0])
decoder_outputs, _ = self._build_decoder(decoder_inputs, [latent]*self.decoder_depth)
# models
self.model = Model(
[encoder_inputs, decoder_inputs], # [input sentences, ground-truth target sentences],
decoder_outputs) # shifted ground-truth sentences
self.model.compile(Adam(lr=self.lr), loss=_dec_loss)
def _inp_out_data(self, data):
inputs = [data['inp_enc']['ctxt'], data['inp_dec']['resp']]
outputs = data['out_dec']['resp']
return data['n_sample'], inputs, outputs
class VanillaMTask(Seq2SeqBase):
def init_extra(self, args):
self.name = 'mtask'
self.loss = [
_dec_loss, # logP(resp | S2S), just the seq2seq loss
_dec_loss, # logP(resp | AE_resp)
_dec_loss, # logP(resp | AE_nonc)
]
self.prefix = ['AE','S2S']
def build_model(self, weights=dict()):
loss_weights = [1., 0.5, 0.5]
self.layers = self._create_layers(weights) # create new
# inputs
inp_enc_ctxt = Input(shape=(None,), name='inp_enc_ctxt')
inp_enc_resp = Input(shape=(None,), name='inp_enc_resp')
inp_dec_resp = Input(shape=(None,), name='inp_dec_resp')
inp_enc_nonc = Input(shape=(None,), name='inp_enc_nonc')
inp_dec_nonc = Input(shape=(None,), name='inp_dec_nonc')
inps_enc = [inp_enc_ctxt, inp_enc_resp, inp_enc_nonc]
inps_dec = [inp_dec_resp, inp_dec_nonc]
inputs = inps_enc + inps_dec
# hiddens
vec_s2s = self._build_encoder(inp_enc_ctxt, prefix='S2S')
vec_ae_resp = self._build_encoder(inp_enc_resp, prefix='AE')
vec_ae_nonc = self._build_encoder(inp_enc_nonc, prefix='AE')
# outputs
out_s2s, _ = self._build_decoder(inp_dec_resp, [vec_s2s]*self.decoder_depth)
out_ae_resp, _ = self._build_decoder(inp_dec_nonc, [vec_ae_resp]*self.decoder_depth)
out_ae_nonc, _ = self._build_decoder(inp_dec_nonc, [vec_ae_nonc]*self.decoder_depth)
outputs = [out_s2s, out_ae_resp, out_ae_nonc]
# compile
self.print_loss(loss_weights)
self.model = Model(inputs, outputs)
self.model.compile(Adam(lr=self.lr), loss=self.loss, loss_weights=loss_weights)
def _inp_out_data(self, data, u=None):
n_sample = data['n_sample']
if n_sample == 0:
return n_sample, [], []
inps_enc = [data['inp_enc']['ctxt'], data['inp_enc']['resp'], data['inp_enc']['nonc']]
inps_dec = [data['inp_dec']['resp'], data['inp_dec']['nonc']]
outs_dec = [data['out_dec']['resp'], data['out_dec']['resp'], data['out_dec']['nonc']]
return n_sample, inps_enc + inps_dec, outs_dec
class StyleFusion(Seq2SeqBase):
def init_extra(self, args):
self.name = args.model_class.lower()
assert(self.name in ['fuse','fuse1'])
self.max_wt_dist = args.wt_dist
self.stddev = args.stddev
self.v1 = (self.name == 'fuse1')
self.ablation = args.ablation
if self.v1:
# roughly, not exactly, follow SpaceFusion v1, as in https://arxiv.org/abs/1902.11205
_dec_loss_ae = _dec_loss
_dist_loss = _absdiff_dist_v1
else:
# v2, consider fuse with nonc
_dec_loss_ae = _dec_loss_u # interp(ae_resp, ae_nonc)
if args.reld:
_dist_loss = _relative_dist # consider all these terms d(s2s,resp), d(s2s,nonc), d(resp), d(nonc), d(s2s)
else:
_dist_loss = _absdiff_dist
self.randmix = True # binary batch mix
self.loss = [
_dec_loss, # logP(resp | S2S), just the seq2seq loss
_dec_loss, # logP(resp | interp), interp is between ctxt and resp, i.e. the 3rd term in Eq.3 in NAACL
_dec_loss_ae,
_dist_loss]
self.prefix = ['AE','S2S']
"""
def refresh_session(self):
K.clear_session() # avoid building graph over and over to slow down everything
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
for clf in self.classifiers:
clf.load()
"""
def build_model(self, weights=dict()):
loss_weights = [1., 1., 1., 1.]
if self.ablation:
loss_weights = [1., 1., 0., 1.] # disable L_{smooth,style}
self.layers = self._create_layers(weights) # create new
noisy = Lambda(_add_noise,
arguments={'stddev':self.stddev},
name='noisy')
concat = Concatenate(name='concat_1', axis=-1)
# inputs
inp_enc_ctxt = Input(shape=(None,), name='inp_enc_ctxt')
inp_enc_resp = Input(shape=(None,), name='inp_enc_resp')
inp_dec_resp = Input(shape=(None,), name='inp_dec_resp')
inps_enc = [inp_enc_ctxt, inp_enc_resp]
inps_dec = [inp_dec_resp]
inp_enc_nonc = Input(shape=(None,), name='inp_enc_nonc')
inp_dec_nonc = Input(shape=(None,), name='inp_dec_nonc')
inps_enc.append(inp_enc_nonc)
inps_dec.append(inp_dec_nonc)
inp_u = [Input(shape=(None,), name='inp_u')] # rand drawn from U(0,1). each batch has the same value, see _inp_out_data
inputs = inps_enc + inps_dec + inp_u # match _inp_out_data
# hiddens
vec_s2s = self._build_encoder(inp_enc_ctxt, prefix='S2S')
vec_ae_resp = self._build_encoder(inp_enc_resp, prefix='AE')
vec_ae_nonc = self._build_encoder(inp_enc_nonc, prefix='AE')
vec_interp_resp = noisy(Lambda(_interp, name='interp_resp')([vec_s2s, vec_ae_resp] + inp_u))
# outputs
out_s2s, _ = self._build_decoder(inp_dec_resp, [vec_s2s]*self.decoder_depth)
out_interp_resp, _ = self._build_decoder(inp_dec_resp, [vec_interp_resp]*self.decoder_depth)
if self.v1:
out_ae, _ = self._build_decoder(inp_dec_nonc, [vec_ae_nonc]*self.decoder_depth)
else:
vec_interp_ae = noisy(Lambda(_interp, name='interp_ae')([vec_ae_resp, vec_ae_nonc] + inp_u))
out_interp_ae_resp, _ = self._build_decoder(inp_dec_resp, [vec_interp_ae]*self.decoder_depth)
out_interp_ae_nonc, _ = self._build_decoder(inp_dec_nonc, [vec_interp_ae]*self.decoder_depth)
out_ae = concat([out_interp_ae_resp, out_interp_ae_nonc])
outs_dec = [out_s2s, out_interp_resp, out_ae]
outs_dist = concat([vec_s2s, vec_ae_resp, vec_ae_nonc])
outputs = outs_dec + [outs_dist]
# compile
self.print_loss(loss_weights)
self.model = Model(inputs, outputs)
self.model.compile(Adam(lr=self.lr), loss=self.loss, loss_weights=loss_weights)
def _inp_out_data(self, data, u=None):
n_sample = data['n_sample']
if n_sample == 0:
return n_sample, [], []
if u is None:
u = np.random.random(n_sample)
else:
u = np.array([u] * n_sample)
inps_enc = [data['inp_enc']['ctxt'], data['inp_enc']['resp']]
inps_dec = [data['inp_dec']['resp']]
outs_dec = [data['out_dec']['resp'], data['out_dec']['resp']]
inps_enc.append(data['inp_enc']['nonc'])
inps_dec.append(data['inp_dec']['nonc'])
inputs = inps_enc + inps_dec + [u]
if self.v1:
outs_dec.append(data['out_dec']['nonc'])
else:
_, l, v = data['out_dec']['resp'].shape
out_interp_nonc = np.zeros([n_sample, l, v*2+1])
out_interp_nonc[:,:,:v] = data['out_dec']['resp']
out_interp_nonc[:,:,v:v*2] = data['out_dec']['nonc']
for t in range(l):
out_interp_nonc[:,t,-1] = u
outs_dec.append(out_interp_nonc)
outputs = outs_dec + [np.zeros((n_sample, 1))]
return n_sample, inputs, outputs
class LossHistory(Callback):
def reset(self):
self.losses = []
def on_train_begin(self, logs={}):
self.reset()
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def _params(name, weights, extra=dict()):
params = {'name':name}
if name in weights:
params['weights'] = weights[name]
for k in extra:
params[k] = extra[k]
return params
def write_log(path, s, PRINT=True, mode='a'):
if PRINT:
print(s)
sys.stdout.flush()
if not s.endswith('\n'):
s += '\n'
if PHILLY:
n_try = 0
while n_try < 3:
try:
with open(path, mode) as f:
f.write(s)
break
except:# PermissionError as e:
#print(e)
print('cannot write_log, sleeping...')
time.sleep(2)
n_try += 1
else:
with open(path, mode) as f:
f.write(s)
# ------------------- customized loss --------------------
def _dist_1nn(a, b=None):
n = BATCH_SIZE
expanded_a = tf.expand_dims(a, 1)
if b is None:
b = a
expanded_b = tf.expand_dims(b, 0)
d_squared = tf.reduce_mean(tf.squared_difference(expanded_a, expanded_b), 2)
mat = tf.sqrt(tf.maximum(0., d_squared))
wt = 1./(mat + tf.eye(n) * 1000 + 1e-6)
sum_wt = tf.reshape(tf.reduce_sum(wt, axis=1), [n, 1])
sum_wt = tf.tile(sum_wt, [1,n])
wt = wt/sum_wt
d1nn = tf.reduce_sum(mat * wt, axis=1)
d1nn = tf.reduce_mean(d1nn)
return d1nn
def _cross_inner(vecs, v1=False):
def sqrt_mse(a, b=None, shuffle=True, cap=None):
if b is None:
b = a
if shuffle:
#diff = a - tf.random_shuffle(b)
_, d = a.shape
n = BATCH_SIZE - 1
diff = tf.slice(a, [1,0], [n,d]) - tf.slice(b, [0,0], [n,d])
else:
diff = a - b
squared = tf.pow(diff, 2)
if cap is not None:
squared = tf.minimum(cap**2, squared)
return tf.sqrt(tf.reduce_mean(squared))
vec_s2s, vec_ae_resp, vec_ae_nonc = tf.split(vecs, 3, axis=-1)
cross_resp = sqrt_mse(vec_s2s, vec_ae_resp, shuffle=False)
inner_s2s_resp = _dist_1nn(vec_s2s)
inner_ae_nonc = _dist_1nn(vec_ae_nonc)
if v1:
print('*'*10 + ' [WARNING] Using v1 cross_inner ' + '*'*10)
return cross_resp, inner_s2s_resp + inner_ae_nonc
else:
cross_s2s_nonc = _dist_1nn(vec_s2s, vec_ae_nonc)
inner_ae_resp = _dist_1nn(vec_ae_resp)
cross = 0.5 * (cross_resp + cross_s2s_nonc)
inner = tf.minimum(tf.minimum(inner_s2s_resp, inner_ae_resp), inner_ae_nonc)
return cross, inner
def _relative_dist(_, y_pred):
cross, inner = _cross_inner(y_pred)
return cross / inner
def _absdiff_dist(_, y_pred):
cross, inner = _cross_inner(y_pred)
return cross - inner
def _absdiff_dist_v1(_, y_pred):
cross, inner = _cross_inner(y_pred, v1=True)
return cross - inner
def _dec_loss(y_true, y_pred):
# to compute - logP(resp|vec_interp_resp)
return tf.reduce_mean(keras.losses.categorical_crossentropy(y_true, y_pred))
def _dec_loss_u(y_true, y_pred):
# to compute u * logP(resp|vec_interp_ae) + (1-u) * logP(nonc|vec_interp_ae)
# where vec_interp_ae = u * vec_resp_ae + (1-u) * vec_nonc_ae
# y_true = concat([y_resp, y_nonc, u]), shape = [BATCH_SIZE, seq_len, 2 * vocab_size + 1], see out_interp_nonc in _in_out_data
# y_pred = concat([y_resp_pred, y_nonc_pred])
y_resp_pred, y_nonc_pred = tf.split(y_pred, 2, axis=-1)
vocab_size = tf.cast(y_resp_pred.shape[2], tf.int32)
y_resp, y_nonc, u = tf.split(y_true, [vocab_size, vocab_size, 1], axis=-1)
u = u[:,:,0] # like tf.squeeze, so [BATCH_SIZE, seq_len]
loss_resp = keras.losses.categorical_crossentropy(y_resp, y_resp_pred) # [BATCH_SIZE, seq_len]
loss_nonc = keras.losses.categorical_crossentropy(y_nonc, y_nonc_pred)
loss = u * loss_resp + (1. - u) * loss_nonc # [BATCH_SIZE, seq_len]
return tf.reduce_mean(loss)
# ------------------- customized layers --------------------
def _add_noise(mu, stddev):
eps = K.random_normal(shape=K.shape(mu))
return mu + tf.multiply(eps, stddev)
def _interp(inp):
if len(inp) == 2:
a, b = inp
u = K.random_uniform(shape=(K.shape(a)[0], 1))
else:
a, b, u = inp
u = K.tile(K.reshape(u, [-1,1]), [1, K.shape(a)[1]]) # repeat along axis=1
#return a + tf.multiply(b - a, u)
return tf.multiply(a, u) + tf.multiply(b, 1 - u)
def convert_model_vocab(path_npz_old, path_npz_new, path_vocab_old, path_vocab_new):
if os.path.exists(path_npz_new):
print('already exists: '+path_npz_new)
return
_, token2index_old = load_vocab(path_vocab_old)
index2token_new, _ = load_vocab(path_vocab_new)
n_old = max(token2index_old.values()) + 1
n_new = max(index2token_new.keys()) + 1
print('vocab: %i => %i'%(n_old, n_new))
new2old = dict()
ix_unk_old = token2index_old[UNK_token]
for ix in index2token_new:
token = index2token_new[ix]
new2old[ix] = token2index_old.get(token, ix_unk_old)
print('loading from: '+str(path_npz_old))
npz = np.load(path_npz_old, encoding='latin1')
weights = npz['layers'].item()
embedding_old = weights['embedding'][0]
softmax_wt_old = weights['decoder_softmax'][0]
softmax_bias_old = weights['decoder_softmax'][1]
n_old_loaded, dim = embedding_old.shape
assert(n_old_loaded == n_old)
embedding_new = np.zeros((n_new, dim))
softmax_wt_new = np.zeros((dim, n_new))
softmax_bias_new = np.zeros((n_new,))
print(' embedding: ' + str(embedding_old.shape) + ' => ' + str(embedding_new.shape))
print(' softmax_wt: ' + str(softmax_wt_old.shape) + ' => ' + str(softmax_wt_new.shape))
print('softmax_bias: ' + str(softmax_bias_old.shape) + ' => ' + str(softmax_bias_new.shape))
# PAD
embedding_new[0,:] = embedding_old[0, :]
softmax_wt_new[:, 0] = softmax_wt_old[:, 0]
softmax_bias_new[0] = softmax_bias_old[0]
for ix in index2token_new:
embedding_new[ix, :] = embedding_old[new2old[ix], :]
softmax_wt_new[:, ix] = softmax_wt_old[:, new2old[ix]]
softmax_bias_new[ix] = softmax_bias_old[new2old[ix]]
weights['embedding'] = [embedding_new]
weights['decoder_softmax'] = [softmax_wt_new, softmax_bias_new]
print('saving to: '+str(path_npz_new))
to_save = {'layers':weights}
for k in npz.files:
if k != 'layers' and 'mix' not in k:
to_save[k] = npz[k]
np.savez(path_npz_new, **to_save)
| 24,831 | 28.632458 | 127 | py |
StyleFusion | StyleFusion-master/src/tf_lib.py |
from keras.models import Model, load_model, model_from_yaml
from keras.layers import Input, GRU, Dense, Embedding, Dropout, Concatenate, Lambda, Add, Subtract, Multiply, GaussianNoise
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam, RMSprop
from keras.callbacks import Callback
from keras import backend as K
import tensorflow as tf
from keras.activations import hard_sigmoid
import keras
| 455 | 37 | 123 | py |
NBFNet | NBFNet-master/script/run.py | import os
import sys
import math
import pprint
import torch
from torchdrug import core
from torchdrug.utils import comm
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from nbfnet import dataset, layer, model, task, util
def train_and_validate(cfg, solver):
if cfg.train.num_epoch == 0:
return
step = math.ceil(cfg.train.num_epoch / 10)
best_result = float("-inf")
best_epoch = -1
for i in range(0, cfg.train.num_epoch, step):
kwargs = cfg.train.copy()
kwargs["num_epoch"] = min(step, cfg.train.num_epoch - i)
solver.model.split = "train"
solver.train(**kwargs)
solver.save("model_epoch_%d.pth" % solver.epoch)
solver.model.split = "valid"
metric = solver.evaluate("valid")
result = metric[cfg.metric]
if result > best_result:
best_result = result
best_epoch = solver.epoch
solver.load("model_epoch_%d.pth" % best_epoch)
return solver
def test(cfg, solver):
solver.model.split = "valid"
solver.evaluate("valid")
solver.model.split = "test"
solver.evaluate("test")
if __name__ == "__main__":
args, vars = util.parse_args()
cfg = util.load_config(args.config, context=vars)
working_dir = util.create_working_directory(cfg)
torch.manual_seed(args.seed + comm.get_rank())
logger = util.get_root_logger()
if comm.get_rank() == 0:
logger.warning("Config file: %s" % args.config)
logger.warning(pprint.pformat(cfg))
dataset = core.Configurable.load_config_dict(cfg.dataset)
solver = util.build_solver(cfg, dataset)
train_and_validate(cfg, solver)
test(cfg, solver)
| 1,690 | 25.421875 | 64 | py |
NBFNet | NBFNet-master/script/visualize.py | import os
import sys
import pprint
import torch
from torchdrug import core
from torchdrug.utils import comm
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from nbfnet import dataset, layer, model, task, util
vocab_file = os.path.join(os.path.dirname(__file__), "../data/fb15k237_entity.txt")
vocab_file = os.path.abspath(vocab_file)
def load_vocab(dataset):
entity_mapping = {}
with open(vocab_file, "r") as fin:
for line in fin:
k, v = line.strip().split("\t")
entity_mapping[k] = v
entity_vocab = [entity_mapping[t] for t in dataset.entity_vocab]
relation_vocab = ["%s (%d)" % (t[t.rfind("/") + 1:].replace("_", " "), i)
for i, t in enumerate(dataset.relation_vocab)]
return entity_vocab, relation_vocab
def visualize_path(solver, triplet, entity_vocab, relation_vocab):
num_relation = len(relation_vocab)
h, t, r = triplet.tolist()
triplet = torch.as_tensor([[h, t, r]], device=solver.device)
inverse = torch.as_tensor([[t, h, r + num_relation]], device=solver.device)
solver.model.eval()
pred, (mask, target) = solver.model.predict_and_target(triplet)
pos_pred = pred.gather(-1, target.unsqueeze(-1))
rankings = torch.sum((pos_pred <= pred) & mask, dim=-1) + 1
rankings = rankings.squeeze(0)
logger.warning("")
samples = (triplet, inverse)
for sample, ranking in zip(samples, rankings):
h, t, r = sample.squeeze(0).tolist()
h_name = entity_vocab[h]
t_name = entity_vocab[t]
r_name = relation_vocab[r % num_relation]
if r >= num_relation:
r_name += "^(-1)"
logger.warning(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
logger.warning("rank(%s | %s, %s) = %g" % (t_name, h_name, r_name, ranking))
paths, weights = solver.model.visualize(sample)
for path, weight in zip(paths, weights):
triplets = []
for h, t, r in path:
h_name = entity_vocab[h]
t_name = entity_vocab[t]
r_name = relation_vocab[r % num_relation]
if r >= num_relation:
r_name += "^(-1)"
triplets.append("<%s, %s, %s>" % (h_name, r_name, t_name))
logger.warning("weight: %g\n\t%s" % (weight, " ->\n\t".join(triplets)))
if __name__ == "__main__":
args, vars = util.parse_args()
cfg = util.load_config(args.config, context=vars)
working_dir = util.create_working_directory(cfg)
torch.manual_seed(args.seed + comm.get_rank())
logger = util.get_root_logger()
logger.warning("Config file: %s" % args.config)
logger.warning(pprint.pformat(cfg))
if cfg.dataset["class"] != "FB15k237":
raise ValueError("Visualization is only implemented for FB15k237")
dataset = core.Configurable.load_config_dict(cfg.dataset)
solver = util.build_solver(cfg, dataset)
entity_vocab, relation_vocab = load_vocab(dataset)
for i in range(500):
visualize_path(solver, solver.test_set[i], entity_vocab, relation_vocab)
| 3,106 | 34.306818 | 84 | py |
NBFNet | NBFNet-master/nbfnet/layer.py | import torch
from torch import nn
from torch.nn import functional as F
from torch_scatter import scatter_add, scatter_mean, scatter_max, scatter_min
from torchdrug import layers
from torchdrug.layers import functional
class GeneralizedRelationalConv(layers.MessagePassingBase):
eps = 1e-6
message2mul = {
"transe": "add",
"distmult": "mul",
}
def __init__(self, input_dim, output_dim, num_relation, query_input_dim, message_func="distmult",
aggregate_func="pna", layer_norm=False, activation="relu", dependent=True):
super(GeneralizedRelationalConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_relation = num_relation
self.query_input_dim = query_input_dim
self.message_func = message_func
self.aggregate_func = aggregate_func
self.dependent = dependent
if layer_norm:
self.layer_norm = nn.LayerNorm(output_dim)
else:
self.layer_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
if self.aggregate_func == "pna":
self.linear = nn.Linear(input_dim * 13, output_dim)
else:
self.linear = nn.Linear(input_dim * 2, output_dim)
if dependent:
self.relation_linear = nn.Linear(query_input_dim, num_relation * input_dim)
else:
self.relation = nn.Embedding(num_relation, input_dim)
def message(self, graph, input):
assert graph.num_relation == self.num_relation
batch_size = len(graph.query)
node_in, node_out, relation = graph.edge_list.t()
if self.dependent:
relation_input = self.relation_linear(graph.query).view(batch_size, self.num_relation, self.input_dim)
else:
relation_input = self.relation.weight.expand(batch_size, -1, -1)
relation_input = relation_input.transpose(0, 1)
node_input = input[node_in]
edge_input = relation_input[relation]
if self.message_func == "transe":
message = edge_input + node_input
elif self.message_func == "distmult":
message = edge_input * node_input
elif self.message_func == "rotate":
node_re, node_im = node_input.chunk(2, dim=-1)
edge_re, edge_im = edge_input.chunk(2, dim=-1)
message_re = node_re * edge_re - node_im * edge_im
message_im = node_re * edge_im + node_im * edge_re
message = torch.cat([message_re, message_im], dim=-1)
else:
raise ValueError("Unknown message function `%s`" % self.message_func)
message = torch.cat([message, graph.boundary])
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
node_out = torch.cat([node_out, torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
edge_weight = edge_weight.unsqueeze(-1).unsqueeze(-1)
degree_out = graph.degree_out.unsqueeze(-1).unsqueeze(-1) + 1
if self.aggregate_func == "sum":
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
elif self.aggregate_func == "mean":
update = scatter_mean(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
elif self.aggregate_func == "max":
update = scatter_max(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)[0]
elif self.aggregate_func == "pna":
mean = scatter_mean(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
sq_mean = scatter_mean(message ** 2 * edge_weight, node_out, dim=0, dim_size=graph.num_node)
max = scatter_max(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)[0]
min = scatter_min(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)[0]
std = (sq_mean - mean ** 2).clamp(min=self.eps).sqrt()
features = torch.cat([mean.unsqueeze(-1), max.unsqueeze(-1), min.unsqueeze(-1), std.unsqueeze(-1)], dim=-1)
features = features.flatten(-2)
scale = degree_out.log()
scale = scale / scale.mean()
scales = torch.cat([torch.ones_like(scale), scale, 1 / scale.clamp(min=1e-2)], dim=-1)
update = (features.unsqueeze(-1) * scales.unsqueeze(-2)).flatten(-2)
else:
raise ValueError("Unknown aggregation function `%s`" % self.aggregate_func)
return update
def message_and_aggregate(self, graph, input):
if graph.requires_grad or self.message_func == "rotate":
return super(GeneralizedRelationalConv, self).message_and_aggregate(graph, input)
assert graph.num_relation == self.num_relation
batch_size = len(graph.query)
input = input.flatten(1)
boundary = graph.boundary.flatten(1)
degree_out = graph.degree_out.unsqueeze(-1) + 1
if self.dependent:
relation_input = self.relation_linear(graph.query).view(batch_size, self.num_relation, self.input_dim)
relation_input = relation_input.transpose(0, 1).flatten(1)
else:
relation_input = self.relation.weight.repeat(1, batch_size)
adjacency = graph.adjacency.transpose(0, 1)
if self.message_func in self.message2mul:
mul = self.message2mul[self.message_func]
else:
raise ValueError("Unknown message function `%s`" % self.message_func)
if self.aggregate_func == "sum":
update = functional.generalized_rspmm(adjacency, relation_input, input, sum="add", mul=mul)
update = update + boundary
elif self.aggregate_func == "mean":
update = functional.generalized_rspmm(adjacency, relation_input, input, sum="add", mul=mul)
update = (update + boundary) / degree_out
elif self.aggregate_func == "max":
update = functional.generalized_rspmm(adjacency, relation_input, input, sum="max", mul=mul)
update = torch.max(update, boundary)
elif self.aggregate_func == "pna":
sum = functional.generalized_rspmm(adjacency, relation_input, input, sum="add", mul=mul)
sq_sum = functional.generalized_rspmm(adjacency, relation_input ** 2, input ** 2, sum="add", mul=mul)
max = functional.generalized_rspmm(adjacency, relation_input, input, sum="max", mul=mul)
min = functional.generalized_rspmm(adjacency, relation_input, input, sum="min", mul=mul)
mean = (sum + boundary) / degree_out
sq_mean = (sq_sum + boundary ** 2) / degree_out
max = torch.max(max, boundary)
min = torch.min(min, boundary)
std = (sq_mean - mean ** 2).clamp(min=self.eps).sqrt()
features = torch.cat([mean.unsqueeze(-1), max.unsqueeze(-1), min.unsqueeze(-1), std.unsqueeze(-1)], dim=-1)
features = features.flatten(-2)
scale = degree_out.log()
scale = scale / scale.mean()
scales = torch.cat([torch.ones_like(scale), scale, 1 / scale.clamp(min=1e-2)], dim=-1)
update = (features.unsqueeze(-1) * scales.unsqueeze(-2)).flatten(-2)
else:
raise ValueError("Unknown aggregation function `%s`" % self.aggregate_func)
return update.view(len(update), batch_size, -1)
def combine(self, input, update):
output = self.linear(torch.cat([input, update], dim=-1))
if self.layer_norm:
output = self.layer_norm(output)
if self.activation:
output = self.activation(output)
return output | 7,872 | 46.427711 | 119 | py |
NBFNet | NBFNet-master/nbfnet/task.py | import math
import torch
from torch.nn import functional as F
from torch.utils import data as torch_data
from ogb import linkproppred
from torchdrug import core, tasks, metrics
from torchdrug.layers import functional
from torchdrug.core import Registry as R
Evaluator = core.make_configurable(linkproppred.Evaluator)
Evaluator = R.register("ogb.linkproppred.Evaluator")(Evaluator)
setattr(linkproppred, "Evaluator", Evaluator)
@R.register("tasks.KnowledgeGraphCompletionExt")
class KnowledgeGraphCompletionExt(tasks.KnowledgeGraphCompletion, core.Configurable):
def __init__(self, model, criterion="bce",
metric=("mr", "mrr", "hits@1", "hits@3", "hits@10", "1-to-1", "1-to-n", "n-to-1", "n-to-n"),
num_negative=128, margin=6, adversarial_temperature=0, strict_negative=True, filtered_ranking=True,
fact_ratio=None, sample_weight=True):
super(KnowledgeGraphCompletionExt, self).__init__(
model, criterion, metric, num_negative, margin, adversarial_temperature, strict_negative, filtered_ranking,
fact_ratio, sample_weight)
def preprocess(self, train_set, valid_set, test_set):
super(KnowledgeGraphCompletionExt, self).preprocess(train_set, valid_set, test_set)
degree_hr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
degree_tr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
for h, t, r in train_set:
degree_hr[h, r] += 1
degree_tr[t, r] += 1
has_category = False
for _metric in self.metric:
if _metric in ["1-to-1", "1-to-n", "n-to-1", "n-to-n"]:
has_category = True
if has_category:
is_to_one = degree_hr.sum(dim=0).float() / (degree_hr > 0).sum(dim=0) < 1.5
is_one_to = degree_tr.sum(dim=0).float() / (degree_tr > 0).sum(dim=0) < 1.5
self.register_buffer("is_one_to_one", is_one_to & is_to_one)
self.register_buffer("is_one_to_many", is_one_to & ~is_to_one)
self.register_buffer("is_many_to_one", ~is_one_to & is_to_one)
self.register_buffer("is_many_to_many", ~is_one_to & ~is_to_one)
assert self.is_one_to_one.sum() + self.is_one_to_many.sum() + \
self.is_many_to_one.sum() + self.is_many_to_many.sum() == self.num_relation
assert (self.is_one_to_one | self.is_one_to_many | self.is_many_to_one | self.is_many_to_many).all()
def target(self, batch):
mask, target = super(KnowledgeGraphCompletionExt, self).target(batch)
relation = batch[:, 2]
# in case of GPU OOM
return mask, target, relation.cpu()
def evaluate(self, pred, target):
mask, target, relation = target
pos_pred = pred.gather(-1, target.unsqueeze(-1))
if self.filtered_ranking:
ranking = torch.sum((pos_pred <= pred) & mask, dim=-1) + 1
else:
ranking = torch.sum(pos_pred <= pred, dim=-1) + 1
metric = {}
for _metric in self.metric:
if _metric == "mr":
score = ranking.float().mean()
elif _metric == "mrr":
score = (1 / ranking.float()).mean()
elif _metric.startswith("hits@"):
threshold = int(_metric[5:])
score = (ranking <= threshold).float().mean()
elif _metric == "1-to-1":
score = (1 / ranking[self.is_one_to_one[relation]].float()).mean()
metric["1-to-1 tail"] = (1 / ranking[self.is_one_to_one[relation]].float()).mean(dim=0)[0]
metric["1-to-1 head"] = (1 / ranking[self.is_one_to_one[relation]].float()).mean(dim=0)[1]
elif _metric == "1-to-n":
score = (1 / ranking[self.is_one_to_many[relation]].float()).mean()
metric["1-to-n tail"] = (1 / ranking[self.is_one_to_many[relation]].float()).mean(dim=0)[0]
metric["1-to-n head"] = (1 / ranking[self.is_one_to_many[relation]].float()).mean(dim=0)[1]
elif _metric == "n-to-1":
score = (1 / ranking[self.is_many_to_one[relation]].float()).mean()
metric["n-to-1 tail"] = (1 / ranking[self.is_many_to_one[relation]].float()).mean(dim=0)[0]
metric["n-to-1 head"] = (1 / ranking[self.is_many_to_one[relation]].float()).mean(dim=0)[1]
elif _metric == "n-to-n":
score = (1 / ranking[self.is_many_to_many[relation]].float()).mean()
metric["n-to-n tail"] = (1 / ranking[self.is_many_to_many[relation]].float()).mean(dim=0)[0]
metric["n-to-n head"] = (1 / ranking[self.is_many_to_many[relation]].float()).mean(dim=0)[1]
else:
raise ValueError("Unknown metric `%s`" % _metric)
name = tasks._get_metric_name(_metric)
metric[name] = score
return metric
@R.register("tasks.LinkPrediction")
class LinkPrediction(tasks.Task, core.Configurable):
_option_members = ["criterion", "metric"]
def __init__(self, model, criterion="bce", metric=("auroc", "ap"), num_negative=128, strict_negative=True):
super(LinkPrediction, self).__init__()
self.model = model
self.criterion = criterion
self.metric = metric
self.num_negative = num_negative
self.strict_negative = strict_negative
def preprocess(self, train_set, valid_set, test_set):
if isinstance(train_set, torch_data.Subset):
dataset = train_set.dataset
else:
dataset = train_set
self.num_node = dataset.num_node
train_mask = train_set.indices
valid_mask = train_set.indices + valid_set.indices
train_graph = dataset.graph.edge_mask(train_mask)
valid_graph = dataset.graph.edge_mask(valid_mask)
self.register_buffer("train_graph", train_graph.undirected())
self.register_buffer("valid_graph", valid_graph.undirected())
self.register_buffer("test_graph", dataset.graph.undirected())
def forward(self, batch):
all_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
metric = {}
pred, target = self.predict_and_target(batch, all_loss, metric)
metric.update(self.evaluate(pred, target))
for criterion, weight in self.criterion.items():
if criterion == "bce":
loss = F.binary_cross_entropy_with_logits(pred, target, reduction="none")
neg_weight = torch.ones_like(pred)
neg_weight[:, 1:] = 1 / self.num_negative
loss = (loss * neg_weight).sum(dim=-1) / neg_weight.sum(dim=-1)
else:
raise ValueError("Unknown criterion `%s`" % criterion)
loss = loss.mean()
name = tasks._get_criterion_name(criterion)
metric[name] = loss
all_loss += loss * weight
return all_loss, metric
@torch.no_grad()
def _strict_negative(self, count, split="train"):
graph = getattr(self, "%s_graph" % split)
node_in = graph.edge_list[:, 0]
degree_in = torch.bincount(node_in, minlength=self.num_node)
prob = (graph.num_node - degree_in - 1).float()
neg_h_index = functional.multinomial(prob, count, replacement=True)
any = -torch.ones_like(neg_h_index)
pattern = torch.stack([neg_h_index, any], dim=-1)
edge_index, num_t_truth = graph.match(pattern)
t_truth_index = graph.edge_list[edge_index, 1]
pos_index = functional._size_to_index(num_t_truth)
t_mask = torch.ones(count, self.num_node, dtype=torch.bool, device=self.device)
t_mask[pos_index, t_truth_index] = 0
t_mask.scatter_(1, neg_h_index.unsqueeze(-1), 0)
neg_t_candidate = t_mask.nonzero()[:, 1]
num_t_candidate = t_mask.sum(dim=-1)
neg_t_index = functional.variadic_sample(neg_t_candidate, num_t_candidate, 1).squeeze(-1)
return neg_h_index, neg_t_index
def predict_and_target(self, batch, all_loss=None, metric=None):
batch_size = len(batch)
pos_h_index, pos_t_index = batch.t()
if self.split == "train":
num_negative = self.num_negative
else:
num_negative = 1
if self.strict_negative or self.split != "train":
neg_h_index, neg_t_index = self._strict_negative(batch_size * num_negative, self.split)
else:
neg_h_index, neg_t_index = torch.randint(self.num_node, (2, batch_size * num_negative), device=self.device)
neg_h_index = neg_h_index.view(batch_size, num_negative)
neg_t_index = neg_t_index.view(batch_size, num_negative)
h_index = pos_h_index.unsqueeze(-1).repeat(1, num_negative + 1)
t_index = pos_t_index.unsqueeze(-1).repeat(1, num_negative + 1)
h_index[:, 1:] = neg_h_index
t_index[:, 1:] = neg_t_index
pred = self.model(self.train_graph, h_index, t_index, all_loss=all_loss, metric=metric)
target = torch.zeros_like(pred)
target[:, 0] = 1
return pred, target
def evaluate(self, pred, target):
pred = pred.flatten()
target = target.flatten()
metric = {}
for _metric in self.metric:
if _metric == "auroc":
score = metrics.area_under_roc(pred, target)
elif _metric == "ap":
score = metrics.area_under_prc(pred, target)
else:
raise ValueError("Unknown metric `%s`" % _metric)
name = tasks._get_metric_name(_metric)
metric[name] = score
return metric
@R.register("tasks.InductiveKnowledgeGraphCompletion")
class InductiveKnowledgeGraphCompletion(tasks.KnowledgeGraphCompletion, core.Configurable):
def __init__(self, model, criterion="bce", metric=("mr", "mrr", "hits@1", "hits@3", "hits@10", "hits@10_50"),
num_negative=128, margin=6, adversarial_temperature=0, strict_negative=True, sample_weight=True):
super(InductiveKnowledgeGraphCompletion, self).__init__(
model, criterion, metric, num_negative, margin, adversarial_temperature, strict_negative,
sample_weight=sample_weight)
def preprocess(self, train_set, valid_set, test_set):
if isinstance(train_set, torch_data.Subset):
dataset = train_set.dataset
else:
dataset = train_set
self.num_entity = dataset.num_entity
self.num_relation = dataset.num_relation
self.register_buffer("fact_graph", dataset.graph)
if self.sample_weight:
degree_hr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
degree_tr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
for h, t, r in train_set:
degree_hr[h, r] += 1
degree_tr[t, r] += 1
self.register_buffer("degree_hr", degree_hr)
self.register_buffer("degree_tr", degree_tr)
self.register_buffer("train_graph", dataset.train_graph)
self.register_buffer("valid_graph", dataset.valid_graph)
self.register_buffer("test_graph", dataset.test_graph)
return train_set, valid_set, test_set
def predict(self, batch, all_loss=None, metric=None):
pos_h_index, pos_t_index, pos_r_index = batch.t()
batch_size = len(batch)
graph = getattr(self, "%s_graph" % self.split)
if all_loss is None:
# test
all_index = torch.arange(graph.num_node, device=self.device)
t_preds = []
h_preds = []
for neg_index in all_index.split(self.num_negative):
r_index = pos_r_index.unsqueeze(-1).expand(-1, len(neg_index))
h_index, t_index = torch.meshgrid(pos_h_index, neg_index)
t_pred = self.model(graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
t_preds.append(t_pred)
t_pred = torch.cat(t_preds, dim=-1)
for neg_index in all_index.split(self.num_negative):
r_index = pos_r_index.unsqueeze(-1).expand(-1, len(neg_index))
t_index, h_index = torch.meshgrid(pos_t_index, neg_index)
h_pred = self.model(graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
h_preds.append(h_pred)
h_pred = torch.cat(h_preds, dim=-1)
pred = torch.stack([t_pred, h_pred], dim=1)
# in case of GPU OOM
pred = pred.cpu()
else:
# train
if self.strict_negative:
neg_index = self._strict_negative(pos_h_index, pos_t_index, pos_r_index)
else:
neg_index = torch.randint(self.num_entity, (batch_size, self.num_negative), device=self.device)
h_index = pos_h_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index = pos_t_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
r_index = pos_r_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index[:batch_size // 2, 1:] = neg_index[:batch_size // 2]
h_index[batch_size // 2:, 1:] = neg_index[batch_size // 2:]
pred = self.model(graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
return pred
def target(self, batch):
# test target
batch_size = len(batch)
graph = getattr(self, "%s_graph" % self.split)
pos_h_index, pos_t_index, pos_r_index = batch.t()
any = -torch.ones_like(pos_h_index)
pattern = torch.stack([pos_h_index, any, pos_r_index], dim=-1)
edge_index, num_t_truth = graph.match(pattern)
t_truth_index = graph.edge_list[edge_index, 1]
pos_index = functional._size_to_index(num_t_truth)
t_mask = torch.ones(batch_size, graph.num_node, dtype=torch.bool, device=self.device)
t_mask[pos_index, t_truth_index] = 0
pattern = torch.stack([any, pos_t_index, pos_r_index], dim=-1)
edge_index, num_h_truth = graph.match(pattern)
h_truth_index = graph.edge_list[edge_index, 0]
pos_index = functional._size_to_index(num_h_truth)
h_mask = torch.ones(batch_size, graph.num_node, dtype=torch.bool, device=self.device)
h_mask[pos_index, h_truth_index] = 0
mask = torch.stack([t_mask, h_mask], dim=1)
target = torch.stack([pos_t_index, pos_h_index], dim=1)
# in case of GPU OOM
return mask.cpu(), target.cpu()
def evaluate(self, pred, target):
mask, target = target
pos_pred = pred.gather(-1, target.unsqueeze(-1))
ranking = torch.sum((pos_pred <= pred) & mask, dim=-1) + 1
metric = {}
for _metric in self.metric:
if _metric == "mr":
score = ranking.float().mean()
elif _metric == "mrr":
score = (1 / ranking.float()).mean()
elif _metric.startswith("hits@"):
values = _metric[5:].split("_")
threshold = int(values[0])
if len(values) > 1:
num_sample = int(values[1])
# unbiased estimation
fp_rate = (ranking - 1).float() / mask.sum(dim=-1)
score = 0
for i in range(threshold):
# choose i false positive from num_sample negatives
num_comb = math.factorial(num_sample) / math.factorial(i) / math.factorial(num_sample - i)
score += num_comb * (fp_rate ** i) * ((1 - fp_rate) ** (num_sample - i))
score = score.mean()
else:
score = (ranking <= threshold).float().mean()
else:
raise ValueError("Unknown metric `%s`" % _metric)
name = tasks._get_metric_name(_metric)
metric[name] = score
return metric
@R.register("tasks.KnowledgeGraphCompletionOGB")
class KnowledgeGraphCompletionOGB(tasks.KnowledgeGraphCompletion, core.Configurable):
def __init__(self, model, criterion="bce", evaluator=None, num_negative=128, margin=6, adversarial_temperature=0,
strict_negative=True, heterogeneous_negative=False, fact_ratio=None, sample_weight=True):
super(KnowledgeGraphCompletionOGB, self).__init__(
model, criterion, None, num_negative, margin, adversarial_temperature, strict_negative, True,
fact_ratio, sample_weight)
self.evaluator = evaluator
self.heterogeneous_negative = heterogeneous_negative
def preprocess(self, train_set, valid_set, test_set):
if isinstance(train_set, torch_data.Subset):
dataset = train_set.dataset
else:
dataset = train_set
self.num_entity = dataset.num_entity
self.num_relation = dataset.num_relation
self.register_buffer("graph", dataset.graph)
fact_mask = torch.zeros(len(dataset), dtype=torch.bool)
fact_mask[train_set.indices] = 1
if self.fact_ratio:
length = int(len(train_set) * self.fact_ratio)
index = torch.randperm(len(train_set))[length:]
train_indices = torch.tensor(train_set.indices)
fact_mask[train_indices[index]] = 0
train_set = torch_data.Subset(train_set, index)
self.register_buffer("fact_graph", dataset.graph.edge_mask(fact_mask))
if self.sample_weight:
degree_hr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
degree_tr = torch.zeros(self.num_entity, self.num_relation, dtype=torch.long)
for h, t, r in train_set:
degree_hr[h, r] += 1
degree_tr[t, r] += 1
self.register_buffer("degree_hr", degree_hr)
self.register_buffer("degree_tr", degree_tr)
return train_set, valid_set, test_set
@torch.no_grad()
def _strict_negative(self, pos_h_index, pos_t_index, pos_r_index):
batch_size = len(pos_h_index)
any = -torch.ones_like(pos_h_index)
node_type = self.fact_graph.node_type
pattern = torch.stack([pos_h_index, any, pos_r_index], dim=-1)
pattern = pattern[:batch_size // 2]
edge_index, num_t_truth = self.fact_graph.match(pattern)
t_truth_index = self.fact_graph.edge_list[edge_index, 1]
pos_index = functional._size_to_index(num_t_truth)
if self.heterogeneous_negative:
pos_t_type = node_type[pos_t_index[:batch_size // 2]]
t_mask = pos_t_type.unsqueeze(-1) == node_type.unsqueeze(0)
else:
t_mask = torch.ones(len(pattern), self.num_entity, dtype=torch.bool, device=self.device)
t_mask[pos_index, t_truth_index] = 0
neg_t_candidate = t_mask.nonzero()[:, 1]
num_t_candidate = t_mask.sum(dim=-1)
neg_t_index = functional.variadic_sample(neg_t_candidate, num_t_candidate, self.num_negative)
pattern = torch.stack([any, pos_t_index, pos_r_index], dim=-1)
pattern = pattern[batch_size // 2:]
edge_index, num_h_truth = self.fact_graph.match(pattern)
h_truth_index = self.fact_graph.edge_list[edge_index, 0]
pos_index = functional._size_to_index(num_h_truth)
if self.heterogeneous_negative:
pos_h_type = node_type[pos_h_index[batch_size // 2:]]
h_mask = pos_h_type.unsqueeze(-1) == node_type.unsqueeze(0)
else:
h_mask = torch.ones(len(pattern), self.num_entity, dtype=torch.bool, device=self.device)
h_mask[pos_index, h_truth_index] = 0
neg_h_candidate = h_mask.nonzero()[:, 1]
num_h_candidate = h_mask.sum(dim=-1)
neg_h_index = functional.variadic_sample(neg_h_candidate, num_h_candidate, self.num_negative)
neg_index = torch.cat([neg_t_index, neg_h_index])
return neg_index
def predict(self, batch, all_loss=None, metric=None):
batch_size = len(batch)
if all_loss is None:
# test
h_index, t_index, r_index = batch.unbind(-1)
pattern = batch[:, 0, :]
num_match = self.fact_graph.match(pattern)[1]
assert (num_match == 0).all()
pred = self.model(self.fact_graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
# in case of GPU OOM
pred = pred.cpu()
else:
# train
pos_h_index, pos_t_index, pos_r_index = batch.t()
if self.strict_negative:
neg_index = self._strict_negative(pos_h_index, pos_t_index, pos_r_index)
else:
neg_index = torch.randint(self.num_entity, (batch_size, self.num_negative), device=self.device)
h_index = pos_h_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index = pos_t_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
r_index = pos_r_index.unsqueeze(-1).repeat(1, self.num_negative + 1)
t_index[:batch_size // 2, 1:] = neg_index[:batch_size // 2]
h_index[batch_size // 2:, 1:] = neg_index[batch_size // 2:]
pred = self.model(self.fact_graph, h_index, t_index, r_index, all_loss=all_loss, metric=metric)
return pred
def target(self, batch):
# test target
batch_size = len(batch)
target = torch.zeros(batch_size, dtype=torch.long, device=self.device)
# in case of GPU OOM
return target.cpu()
def evaluate(self, pred, target):
is_positive = torch.zeros(pred.shape, dtype=torch.bool)
is_positive.scatter_(-1, target.unsqueeze(-1), 1)
pos_pred = pred[is_positive]
neg_pred = pred[~is_positive].view(len(pos_pred), -1)
metric = self.evaluator.eval({"y_pred_pos": pos_pred, "y_pred_neg": neg_pred})
new_metric = {}
for key in metric:
new_key = key.split("_")[0]
new_metric[new_key] = metric[key].mean()
return new_metric | 22,136 | 44.832298 | 119 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.