repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
pyslam | pyslam-master/utils_files.py | # From torchvision.uitls
import smtplib, socket, os, os.path, hashlib, errno
import __main__ as main
from email.mime.text import MIMEText
from os import path, mkdir
def check_integrity(fpath, md5):
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, "rb") as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b""):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True
def download_url(url, root, filename, md5):
from six.moves import urllib
root = os.path.expanduser(root)
fpath = os.path.join(root, filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print("Using downloaded and verified file: " + fpath)
else:
try:
print("Downloading " + url + " to " + fpath)
urllib.request.urlretrieve(url, fpath)
except:
if url[:5] == "https":
url = url.replace("https:", "http:")
print("Failed download. Trying https -> http instead." " Downloading " + url + " to " + fpath)
urllib.request.urlretrieve(url, fpath)
def check_dir(dir):
if not path.isdir(dir):
mkdir(dir)
def list_dir(root, prefix=False):
### List all directories at a given root
# root (str): Path to directory whose folders need to be listed
# prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found
root = os.path.expanduser(root)
directories = list(filter(lambda p: os.path.isdir(os.path.join(root, p)), os.listdir(root)))
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
### List all files ending with a suffix at a given root
# root (str): Path to directory whose folders need to be listed
# suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly
# prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found
root = os.path.expanduser(root)
files = list(filter(lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix), os.listdir(root)))
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def make_dirs(dir, exist_ok=True):
# helper function, adds exist_ok to python2
if exist_ok:
try:
os.makedirs(dir)
except:
pass
else:
os.makedirs(dir)
import smtplib, socket, os
from email.mime.text import MIMEText
# Open a plain text file for reading. For this example, assume that
# the text file contains only ASCII characters.
# with open(textfile) as fp:
# Create a text/plain message
# msg = MIMEText(fp.read())
def send_email(recipient, ignore_host="", login_gmail="", login_password=""):
msg = MIMEText("")
if socket.gethostname() == ignore_host:
return
msg["Subject"] = socket.gethostname() + " just finished running a job " + os.path.basename(main.__file__)
msg["From"] = "clustersgpu@gmail.com"
msg["To"] = recipient
s = smtplib.SMTP_SSL("smtp.gmail.com", 465)
s.ehlo()
s.login(login_gmail, login_password)
s.sendmail(login_gmail, recipient, msg.as_string())
s.quit()
| 3,694 | 31.412281 | 155 | py |
pyslam | pyslam-master/feature_hardnet.py | """
* This file is part of PYSLAM
* adapted from https://github.com/DagnyT/hardnet/blob/master/examples/extract_hardnet_desc_from_hpatches_file.py, see licence therein.
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
# adapted from https://github.com/DagnyT/hardnet/blob/master/examples/extract_hardnet_desc_from_hpatches_file.py
import config
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import time
import os
import cv2
import math
import numpy as np
from utils_features import extract_patches_tensor, extract_patches_array, extract_patches_array_cpp
kVerbose = True
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sum(torch.abs(x), dim = 1) + self.eps
x= x / norm.expand_as(x)
return x
class HardNet(nn.Module):
"""HardNet model definition
"""
def __init__(self):
super(HardNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2,padding=1, bias = False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(128, 128, kernel_size=8, bias = False),
nn.BatchNorm2d(128, affine=False),
)
#self.features.apply(weights_init)
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1)
sp = torch.std(flat, dim=1) + 1e-7
return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def forward(self, input):
x_features = self.features(self.input_norm(input))
x = x_features.view(x_features.size(0), -1)
return L2Norm()(x)
# interface for pySLAM
class HardnetFeature2D:
def __init__(self, do_cuda=True):
print('Using HardnetFeature2D')
self.model_base_path = config.cfg.root_folder + '/thirdparty/hardnet/'
self.model_weights_path = self.model_base_path + 'pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth'
#print('model_weights_path:',self.model_weights_path)
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
device = torch.device("cuda:0" if self.do_cuda else "cpu")
torch.set_grad_enabled(False)
# mag_factor is how many times the original keypoint scale
# is enlarged to generate a patch from a keypoint
self.mag_factor = 1.0
# inference batch size
self.batch_size = 512
self.process_all = True # process all the patches at once
print('==> Loading pre-trained network.')
self.model = HardNet()
self.checkpoint = torch.load(self.model_weights_path)
self.model.load_state_dict(self.checkpoint['state_dict'])
if self.do_cuda:
self.model.cuda()
print('Extracting on GPU')
else:
print('Extracting on CPU')
self.model = model.cpu()
self.model.eval()
print('==> Successfully loaded pre-trained network.')
def compute_des_batches(self, patches):
n_batches = int(len(patches) / self.batch_size) + 1
descriptors_for_net = np.zeros((len(patches), 128))
for i in range(0, len(patches), self.batch_size):
data_a = patches[i: i + self.batch_size, :, :, :].astype(np.float32)
data_a = torch.from_numpy(data_a)
if self.do_cuda:
data_a = data_a.cuda()
data_a = Variable(data_a)
# compute output
with torch.no_grad():
out_a = self.model(data_a)
descriptors_for_net[i: i + self.batch_size,:] = out_a.data.cpu().numpy().reshape(-1, 128)
return descriptors_for_net
def compute_des(self, patches):
patches = torch.from_numpy(patches).float()
patches = torch.unsqueeze(patches,1)
if self.do_cuda:
patches = patches.cuda()
with torch.no_grad():
descrs = self.model(patches)
return descrs.detach().cpu().numpy().reshape(-1, 128)
def compute(self, img, kps, mask=None): #mask is a fake input
num_kps = len(kps)
des = []
if num_kps>0:
if not self.process_all:
# compute descriptor for each patch
patches = extract_patches_tensor(img, kps, patch_size=32, mag_factor=self.mag_factor)
# patches /= 255.
# patches -= 0.443728476019
# patches /= 0.20197947209
patches = (patches/255. - 0.443728476019)/0.20197947209
des = self.compute_des_batches(patches).astype(np.float32)
else:
# compute descriptor by feeeding the full patch tensor to the network
t = time.time()
if False:
# use python code
patches = extract_patches_array(img, kps, patch_size=32, mag_factor=self.mag_factor)
else:
# use faster cpp code
patches = extract_patches_array_cpp(img, kps, patch_size=32, mag_factor=self.mag_factor)
patches = np.asarray(patches)
if kVerbose:
print('patches.shape:',patches.shape)
# patches /= 255.
# patches -= 0.443728476019
# patches /= 0.20197947209
patches = (patches/255. - 0.443728476019)/0.20197947209
if kVerbose:
print('patch elapsed: ', time.time()-t)
des = self.compute_des(patches)
if kVerbose:
print('descriptor: HARDNET, #features: ', len(kps), ', frame res: ', img.shape[0:2])
return kps, des
| 8,043 | 38.821782 | 155 | py |
pyslam | pyslam-master/feature_manager.py | """
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import math
from enum import Enum
import numpy as np
import cv2
from collections import Counter
from parameters import Parameters
from feature_types import FeatureDetectorTypes, FeatureDescriptorTypes, FeatureInfo
from utils_sys import Printer, import_from
from utils_features import unpackSiftOctaveKps, UnpackOctaveMethod, sat_num_features, kdt_nms, ssc_nms, octree_nms, grid_nms
from utils_geom import hamming_distance, hamming_distances, l2_distance, l2_distances
from feature_manager_adaptors import BlockAdaptor, PyramidAdaptor
from pyramid import Pyramid, PyramidType
from feature_root_sift import RootSIFTFeature2D
from feature_shitomasi import ShiTomasiDetector
# import and check
SuperPointFeature2D = import_from('feature_superpoint', 'SuperPointFeature2D')
TfeatFeature2D = import_from('feature_tfeat', 'TfeatFeature2D')
Orbslam2Feature2D = import_from('feature_orbslam2', 'Orbslam2Feature2D')
HardnetFeature2D = import_from('feature_hardnet', 'HardnetFeature2D')
GeodescFeature2D = import_from('feature_geodesc', 'GeodescFeature2D')
SosnetFeature2D = import_from('feature_sosnet', 'SosnetFeature2D')
if False:
L2NetKerasFeature2D = import_from('feature_l2net_keras', 'L2NetKerasFeature2D') # not used at present time
L2NetFeature2D = import_from('feature_l2net', 'L2NetFeature2D')
LogpolarFeature2D = import_from('feature_logpolar', 'LogpolarFeature2D')
D2NetFeature2D = import_from('feature_d2net', 'D2NetFeature2D')
DelfFeature2D = import_from('feature_delf', 'DelfFeature2D')
ContextDescFeature2D = import_from('feature_contextdesc', 'ContextDescFeature2D')
LfNetFeature2D = import_from('feature_lfnet', 'LfNetFeature2D')
R2d2Feature2D = import_from('feature_r2d2', 'R2d2Feature2D')
KeyNetDescFeature2D = import_from('feature_keynet', 'KeyNetDescFeature2D')
DiskFeature2D = import_from('feature_disk', 'DiskFeature2D')
kVerbose = True
kNumFeatureDefault = Parameters.kNumFeatures
kNumLevelsDefault = 4
kScaleFactorDefault = 1.2
kNumLevelsInitSigma = 40
kSigmaLevel0 = Parameters.kSigmaLevel0
kDrawOriginalExtractedFeatures = False # for debugging
kFASTKeyPointSizeRescaleFactor = 4 # 7 is the standard keypoint size on layer 0 => actual size = 7*kFASTKeyPointSizeRescaleFactor
kAGASTKeyPointSizeRescaleFactor = 4 # 7 is the standard keypoint size on layer 0 => actual size = 7*kAGASTKeyPointSizeRescaleFactor
kShiTomasiKeyPointSizeRescaleFactor = 5 # 5 is the selected keypoint size on layer 0 (see below) => actual size = 5*kShiTomasiKeyPointSizeRescaleFactor
if not kVerbose:
def print(*args, **kwargs):
pass
class KeyPointFilterTypes(Enum):
NONE = 0
SAT = 1 # sat the number of features (keep the best N features: 'best' on the basis of the keypoint.response)
KDT_NMS = 2 # Non-Maxima Suppression based on kd-tree
SSC_NMS = 3 # Non-Maxima Suppression based on https://github.com/BAILOOL/ANMS-Codes
OCTREE_NMS = 4 # Distribute keypoints by using a octree (as a matter of fact, a quadtree): from ORBSLAM2
GRID_NMS = 5 # NMS by using a grid
def feature_manager_factory(num_features=kNumFeatureDefault,
num_levels = kNumLevelsDefault, # number of pyramid levels or octaves for detector and descriptor
scale_factor = kScaleFactorDefault, # detection scale factor (if it can be set, otherwise it is automatically computed)
detector_type = FeatureDetectorTypes.FAST,
descriptor_type = FeatureDescriptorTypes.ORB):
return FeatureManager(num_features, num_levels, scale_factor, detector_type, descriptor_type)
# Manager of both detector and descriptor
# This exposes an interface that is similar to OpenCV::Feature2D, i.e. detect(), compute() and detectAndCompute()
class FeatureManager(object):
def __init__(self, num_features=kNumFeatureDefault,
num_levels = kNumLevelsDefault, # number of pyramid levels or octaves for detector and descriptor
scale_factor = kScaleFactorDefault, # detection scale factor (if it can be set, otherwise it is automatically computed)
detector_type = FeatureDetectorTypes.FAST,
descriptor_type = FeatureDescriptorTypes.ORB):
self.detector_type = detector_type
self._feature_detector = None
self.descriptor_type = descriptor_type
self._feature_descriptor = None
# main feature manager properties
self.num_features = num_features
self.num_levels = num_levels
self.first_level = 0 # not always applicable = > 0: start pyramid from input image;
# -1: start pyramid from up-scaled image*scale_factor (as in SIFT)
self.scale_factor = scale_factor # scale factor bewteen two octaves
self.sigma_level0 = kSigmaLevel0 # sigma on first octave
self.layers_per_octave = 3 # for methods that uses octaves (SIFT, SURF, etc)
# feature norm options
self.norm_type = None # descriptor norm type
self.descriptor_distance = None # pointer to a function for computing the distance between two points
self.descriptor_distances = None # pointer to a function for computing the distances between two array of corresponding points
# block adaptor options
self.use_bock_adaptor = False
self.block_adaptor = None
# pyramid adaptor options: at present time pyramid adaptor has the priority and can combine a block adaptor withint itself
self.use_pyramid_adaptor = False
self.pyramid_adaptor = None
self.pyramid_type = PyramidType.RESIZE
self.pyramid_do_parallel = True
self.do_sat_features_per_level = False # if pyramid adaptor is active, one can require to compute a certain number of features per level (see PyramidAdaptor)
self.force_multiscale_detect_and_compute = False # automatically managed below depending on features
self.oriented_features = True # automatically managed below depending on selected features
self.do_keypoints_size_rescaling = False # automatically managed below depending on selected features
self.need_color_image = False # automatically managed below depending on selected features
self.keypoint_filter_type = KeyPointFilterTypes.SAT # default keypoint-filter type
self.need_nms = False # need or not non-maximum suppression of keypoints
self.keypoint_nms_filter_type = KeyPointFilterTypes.KDT_NMS # default keypoint-filter type if NMS is needed
# initialize sigmas for keypoint levels (used for SLAM)
self.init_sigma_levels()
# --------------------------------------------- #
# manage different opencv versions
# --------------------------------------------- #
print("using opencv ", cv2.__version__)
# check opencv version in order to use the right modules
opencv_major = int(cv2.__version__.split('.')[0])
opencv_minor = int(cv2.__version__.split('.')[1])
if opencv_major == 3:
SIFT_create = import_from('cv2.xfeatures2d','SIFT_create')
SURF_create = import_from('cv2.xfeatures2d','SURF_create')
FREAK_create = import_from('cv2.xfeatures2d','FREAK_create')
ORB_create = import_from('cv2','ORB_create')
BRISK_create = import_from('cv2','BRISK_create')
KAZE_create = import_from('cv2','KAZE_create')
AKAZE_create = import_from('cv2','AKAZE_create')
BoostDesc_create = import_from('cv2','xfeatures2d_BoostDesc','create')
MSD_create = import_from('cv2','xfeatures2d_MSDDetector') # found but it does not work! (it does not find the .create() method)
#Affine_create = import_from('cv2','xfeatures2d_AffineFeature2D') # not found
DAISY_create = import_from('cv2','xfeatures2d_DAISY','create')
STAR_create = import_from('cv2','xfeatures2d_StarDetector','create')
HL_create = import_from('cv2','xfeatures2d_HarrisLaplaceFeatureDetector','create')
LATCH_create = import_from('cv2','xfeatures2d_LATCH','create')
LUCID_create = import_from('cv2','xfeatures2d_LUCID','create')
VGG_create = import_from('cv2','xfeatures2d_VGG','create')
BEBLID_create = import_from('cv2','xfeatures2d','BEBLID_create')
elif opencv_major == 4 and opencv_minor >= 5:
SIFT_create = import_from('cv2','SIFT_create')
SURF_create = import_from('cv2.xfeatures2d','SURF_create')
FREAK_create = import_from('cv2.xfeatures2d','FREAK_create')
ORB_create = import_from('cv2','ORB_create')
BRISK_create = import_from('cv2','BRISK_create')
KAZE_create = import_from('cv2','KAZE_create')
AKAZE_create = import_from('cv2','AKAZE_create')
BoostDesc_create = import_from('cv2','xfeatures2d_BoostDesc','create')
MSD_create = import_from('cv2','xfeatures2d_MSDDetector')
DAISY_create = import_from('cv2','xfeatures2d_DAISY','create')
STAR_create = import_from('cv2','xfeatures2d_StarDetector','create')
HL_create = import_from('cv2','xfeatures2d_HarrisLaplaceFeatureDetector','create')
LATCH_create = import_from('cv2','xfeatures2d_LATCH','create')
LUCID_create = import_from('cv2','xfeatures2d_LUCID','create')
VGG_create = import_from('cv2','xfeatures2d_VGG','create')
BEBLID_create = import_from('cv2','xfeatures2d','BEBLID_create')
else:
SIFT_create = import_from('cv2.xfeatures2d','SIFT_create')
SURF_create = import_from('cv2.xfeatures2d','SURF_create')
FREAK_create = import_from('cv2.xfeatures2d','FREAK_create')
ORB_create = import_from('cv2','ORB')
BRISK_create = import_from('cv2','BRISK')
KAZE_create = import_from('cv2','KAZE')
AKAZE_create = import_from('cv2','AKAZE')
BoostDesc_create = import_from('cv2','xfeatures2d_BoostDesc','create')
MSD_create = import_from('cv2','xfeatures2d_MSDDetector')
DAISY_create = import_from('cv2','xfeatures2d_DAISY','create')
STAR_create = import_from('cv2','xfeatures2d_StarDetector','create')
HL_create = import_from('cv2','xfeatures2d_HarrisLaplaceFeatureDetector','create')
LATCH_create = import_from('cv2','xfeatures2d_LATCH','create')
LUCID_create = import_from('cv2','xfeatures2d_LUCID','create')
VGG_create = import_from('cv2','xfeatures2d_VGG','create')
BEBLID_create = import_from('cv2','xfeatures2d','BEBLID_create')
# pure detectors
self.FAST_create = import_from('cv2','FastFeatureDetector_create')
self.AGAST_create = import_from('cv2','AgastFeatureDetector_create')
self.GFTT_create = import_from('cv2','GFTTDetector_create')
self.MSER_create = import_from('cv2','MSER_create')
self.MSD_create = MSD_create
self.STAR_create = STAR_create
self.HL_create = HL_create
# detectors and descriptors
self.SIFT_create = SIFT_create
self.SURF_create = SURF_create
self.ORB_create = ORB_create
self.BRISK_create = BRISK_create
self.AKAZE_create = AKAZE_create
self.KAZE_create = KAZE_create
# pure descriptors
self.FREAK_create = FREAK_create # only descriptor
self.BoostDesc_create = BoostDesc_create
self.DAISY_create = DAISY_create
self.LATCH_create = LATCH_create
self.LUCID_create = LUCID_create
self.VGG_create = VGG_create
self.BEBLID_create = BEBLID_create
# --------------------------------------------- #
# check if we want descriptor == detector
# --------------------------------------------- #
self.is_detector_equal_to_descriptor = (self.detector_type.name == self.descriptor_type.name)
# N.B.: the following descriptors assume keypoint.octave extacly represents an octave with a scale_factor=2
# and not a generic level with scale_factor < 2
if self.descriptor_type in [
FeatureDescriptorTypes.SIFT, # [NOK] SIFT seems to assume the use of octaves (https://github.com/opencv/opencv_contrib/blob/master/modules/xfeatures2d/src/sift.cpp#L1128)
FeatureDescriptorTypes.ROOT_SIFT, # [NOK] same as SIFT
#FeatureDescriptorTypes.SURF, # [OK] SURF computes the descriptor by considering the keypoint.size (https://github.com/opencv/opencv_contrib/blob/master/modules/xfeatures2d/src/surf.cpp#L600)
FeatureDescriptorTypes.AKAZE, # [NOK] AKAZE does NOT seem to compute the right scale index for each keypoint.size (https://github.com/opencv/opencv/blob/master/modules/features2d/src/kaze/AKAZEFeatures.cpp#L1508)
FeatureDescriptorTypes.KAZE, # [NOK] similar to KAZE
#FeatureDescriptorTypes.FREAK, # [OK] FREAK computes the right scale index for each keypoint.size (https://github.com/opencv/opencv_contrib/blob/master/modules/xfeatures2d/src/freak.cpp#L468)
#FeatureDescriptorTypes.BRISK # [OK] BRISK computes the right scale index for each keypoint.size (https://github.com/opencv/opencv/blob/master/modules/features2d/src/brisk.cpp#L697)
#FeatureDescriptorTypes.BOOST_DESC # [OK] BOOST_DESC seems to properly rectify each keypoint patch size (https://github.com/opencv/opencv_contrib/blob/master/modules/xfeatures2d/src/boostdesc.cpp#L346)
]:
self.scale_factor = 2 # the above descriptors work on octave layers with a scale_factor=2!
Printer.orange('forcing scale factor=2 for detector', self.descriptor_type.name)
self.orb_params = dict(nfeatures=num_features,
scaleFactor=self.scale_factor,
nlevels=self.num_levels,
patchSize=31,
edgeThreshold = 10, #31, #19, #10, # margin from the frame border
fastThreshold = 20,
firstLevel = self.first_level,
WTA_K = 2,
scoreType=cv2.ORB_FAST_SCORE) #scoreType=cv2.ORB_HARRIS_SCORE, scoreType=cv2.ORB_FAST_SCORE
# --------------------------------------------- #
# init detector
# --------------------------------------------- #
if self.detector_type == FeatureDetectorTypes.SIFT or self.detector_type == FeatureDetectorTypes.ROOT_SIFT:
sift = self.SIFT_create(nOctaveLayers=self.layers_per_octave)
self.set_sift_parameters()
if self.detector_type == FeatureDetectorTypes.ROOT_SIFT:
self._feature_detector = RootSIFTFeature2D(sift)
else:
self._feature_detector = sift
#
#
elif self.detector_type == FeatureDetectorTypes.SURF:
self._feature_detector = self.SURF_create(nOctaves = self.num_levels, nOctaveLayers=self.layers_per_octave)
#self.intra_layer_factor = 1.2599 # num layers = nOctaves*nOctaveLayers scale=2^(1/nOctaveLayers) = 1.2599
self.scale_factor = 2 # force scale factor = 2 between octaves
#
#
elif self.detector_type == FeatureDetectorTypes.ORB:
self._feature_detector = self.ORB_create(**self.orb_params)
self.use_bock_adaptor = True # add a block adaptor?
self.need_nms = self.num_levels > 1 # ORB tends to generate overlapping keypoint on different levels <= KDT NMS seems to be very useful here!
#
#
elif self.detector_type == FeatureDetectorTypes.ORB2:
orb2_num_levels = self.num_levels
self._feature_detector = Orbslam2Feature2D(self.num_features, self.scale_factor, orb2_num_levels)
self.keypoint_filter_type = KeyPointFilterTypes.NONE # ORB2 cpp implementation already includes the algorithm OCTREE_NMS
#
#
elif self.detector_type == FeatureDetectorTypes.BRISK:
self._feature_detector = self.BRISK_create(octaves=self.num_levels)
#self.intra_layer_factor = 1.3 # from the BRISK opencv code this seems to be the used scale factor between intra-octave frames
#self.intra_layer_factor = math.sqrt(2) # approx, num layers = nOctaves*nOctaveLayers, from the BRISK paper there are octave ci and intra-octave di layers, t(ci)=2^i, t(di)=2^i * 1.5
self.scale_factor = 2 # force scale factor = 2 between octaves
#self.keypoint_filter_type = KeyPointFilterTypes.NONE
#
#
elif self.detector_type == FeatureDetectorTypes.KAZE:
self._feature_detector = self.KAZE_create(nOctaves=self.num_levels, threshold=0.0005) # default: threshold = 0.001f
self.scale_factor = 2 # force scale factor = 2 between octaves
#
#
elif self.detector_type == FeatureDetectorTypes.AKAZE:
self._feature_detector = self.AKAZE_create(nOctaves=self.num_levels, threshold=0.0005) # default: threshold = 0.001f
self.scale_factor = 2 # force scale factor = 2 between octaves
#
#
elif self.detector_type == FeatureDetectorTypes.SUPERPOINT:
self.oriented_features = False
self._feature_detector = SuperPointFeature2D()
if self.descriptor_type != FeatureDescriptorTypes.NONE:
self.use_pyramid_adaptor = self.num_levels > 1
self.need_nms = self.num_levels > 1
self.pyramid_type = PyramidType.GAUSS_PYRAMID
self.pyramid_do_parallel = False # N.B.: SUPERPOINT interface class is not thread-safe!
self.force_multiscale_detect_and_compute = True # force it since SUPERPOINT cannot compute descriptors separately from keypoints
#
#
elif self.detector_type == FeatureDetectorTypes.FAST:
self.oriented_features = False
self._feature_detector = self.FAST_create(threshold=20, nonmaxSuppression=True)
if self.descriptor_type != FeatureDescriptorTypes.NONE:
#self.use_bock_adaptor = True # override a block adaptor?
self.use_pyramid_adaptor = self.num_levels > 1 # override a pyramid adaptor?
#self.pyramid_type = PyramidType.GAUSS_PYRAMID
#self.first_level = 0
#self.do_sat_features_per_level = True
self.need_nms = self.num_levels > 1
self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
self.do_keypoints_size_rescaling = True
#
#
elif self.detector_type == FeatureDetectorTypes.SHI_TOMASI:
self.oriented_features = False
self._feature_detector = ShiTomasiDetector(self.num_features)
if self.descriptor_type != FeatureDescriptorTypes.NONE:
#self.use_bock_adaptor = False # override a block adaptor?
self.use_pyramid_adaptor = self.num_levels > 1
#self.pyramid_type = PyramidType.GAUSS_PYRAMID
#self.first_level = 0
self.need_nms = self.num_levels > 1
self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
self.do_keypoints_size_rescaling = True
#
#
elif self.detector_type == FeatureDetectorTypes.AGAST:
self.oriented_features = False
self._feature_detector = self.AGAST_create(threshold=10, nonmaxSuppression=True)
if self.descriptor_type != FeatureDescriptorTypes.NONE:
#self.use_bock_adaptor = True # override a block adaptor?
self.use_pyramid_adaptor = self.num_levels > 1 # override a pyramid adaptor?
#self.pyramid_type = PyramidType.GAUSS_PYRAMID
#self.first_level = 0
self.need_nms = self.num_levels > 1
self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
self.do_keypoints_size_rescaling = True
#
#
elif self.detector_type == FeatureDetectorTypes.GFTT:
self.oriented_features = False
self._feature_detector = self.GFTT_create(self.num_features, qualityLevel=0.01, minDistance=3, blockSize=5, useHarrisDetector=False, k=0.04)
if self.descriptor_type != FeatureDescriptorTypes.NONE:
#self.use_bock_adaptor = True # override a block adaptor?
self.use_pyramid_adaptor = self.num_levels > 1 # override a pyramid adaptor?
#self.pyramid_type = PyramidType.GAUSS_PYRAMID
#self.first_level = 0
self.need_nms = self.num_levels > 1
self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
self.do_keypoints_size_rescaling = True
#
#
elif self.detector_type == FeatureDetectorTypes.MSER:
self._feature_detector = self.MSER_create()
#self.use_bock_adaptor = True # override a block adaptor?
self.use_pyramid_adaptor = self.num_levels > 1 # override a pyramid adaptor?
self.pyramid_do_parallel = False # parallel computations generate segmentation fault (is MSER thread-safe?)
#self.pyramid_type = PyramidType.GAUSS_PYRAMID
#self.first_level = 0
self.need_nms = self.num_levels > 1
#self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
#
#
elif self.detector_type == FeatureDetectorTypes.MSD:
#detector = ShiTomasiDetector(self.num_features)
#self._feature_detector = self.MSD_create(detector)
self._feature_detector = self.MSD_create()
print('MSD detector info:',dir(self._feature_detector))
#self.use_bock_adaptor = True # override a block adaptor?
#self.use_pyramid_adaptor = self.num_levels > 1 # override a pyramid adaptor?
#self.pyramid_type = PyramidType.GAUSS_PYRAMID
#self.first_level = 0
#self.need_nms = self.num_levels > 1
#self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
#
#
elif self.detector_type == FeatureDetectorTypes.STAR:
self.oriented_features = False
self._feature_detector = self.STAR_create(maxSize=45,
responseThreshold=10, # =30
lineThresholdProjected=10,
lineThresholdBinarized=8,
suppressNonmaxSize=5)
if self.descriptor_type != FeatureDescriptorTypes.NONE:
#self.use_bock_adaptor = True # override a block adaptor?
self.use_pyramid_adaptor = self.num_levels > 1 # override a pyramid adaptor?
#self.pyramid_type = PyramidType.GAUSS_PYRAMID
#self.first_level = 0
#self.need_nms = self.num_levels > 1
#self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
#
#
elif self.detector_type == FeatureDetectorTypes.HL:
self.oriented_features = False
self._feature_detector = self.HL_create(numOctaves=self.num_levels,
corn_thresh=0.005, # = 0.01
DOG_thresh=0.01, # = 0.01
maxCorners=self.num_features,
num_layers=4) #
self.scale_factor = 2 # force scale factor = 2 between octaves
#
#
elif self.detector_type == FeatureDetectorTypes.D2NET:
self.need_color_image = True
self.num_levels = 1 # force unless you have 12GB of VRAM
multiscale=self.num_levels>1
self._feature_detector = D2NetFeature2D(multiscale=multiscale)
#self.keypoint_filter_type = KeyPointFilterTypes.NONE
#
#
elif self.detector_type == FeatureDetectorTypes.DELF:
self.need_color_image = True
#self.num_levels = 1 # force #scales are computed internally
self._feature_detector = DelfFeature2D(num_features=self.num_features,score_threshold=20)
self.scale_factor = self._feature_detector.scale_factor
#self.keypoint_filter_type = KeyPointFilterTypes.NONE
#
#
elif self.detector_type == FeatureDetectorTypes.CONTEXTDESC:
self.set_sift_parameters()
self.need_color_image = True
#self.num_levels = 1 # force # computed internally by SIFT method
self._feature_detector = ContextDescFeature2D(num_features=self.num_features)
#self.keypoint_filter_type = KeyPointFilterTypes.NONE
#
#
elif self.detector_type == FeatureDetectorTypes.LFNET:
self.need_color_image = True
#self.num_levels = 1 # force
self._feature_detector = LfNetFeature2D(num_features=self.num_features)
#self.keypoint_filter_type = KeyPointFilterTypes.NONE
#
#
elif self.detector_type == FeatureDetectorTypes.R2D2:
self.need_color_image = True
#self.num_levels = - # internally recomputed
self._feature_detector = R2d2Feature2D(num_features=self.num_features)
self.scale_factor = self._feature_detector.scale_f
self.keypoint_filter_type = KeyPointFilterTypes.NONE
#
#
elif self.detector_type == FeatureDetectorTypes.KEYNET:
#self.num_levels = - # internally recomputed
self._feature_detector = KeyNetDescFeature2D(num_features=self.num_features)
self.num_features = self._feature_detector.num_features
self.num_levels = self._feature_detector.num_levels
self.scale_factor = self._feature_detector.scale_factor
self.keypoint_filter_type = KeyPointFilterTypes.NONE
#
#
elif self.detector_type == FeatureDetectorTypes.DISK:
self.num_levels = 1 # force
self.need_color_image = True
self._feature_detector = DiskFeature2D(num_features=self.num_features)
#
#
else:
raise ValueError("Unknown feature detector %s" % self.detector_type)
if self.need_nms:
self.keypoint_filter_type = self.keypoint_nms_filter_type
if self.use_bock_adaptor:
self.orb_params['edgeThreshold'] = 0
# --------------------------------------------- #
# init descriptor
# --------------------------------------------- #
if self.is_detector_equal_to_descriptor:
Printer.green('using same detector and descriptor object: ', self.detector_type.name)
self._feature_descriptor = self._feature_detector
else:
# detector and descriptors are different
self.num_levels_descriptor = self.num_levels
if self.use_pyramid_adaptor:
# NOT VALID ANYMORE -> if there is a pyramid adaptor, the descriptor does not need to rescale the images which are rescaled by the pyramid adaptor itself
#self.orb_params['nlevels'] = 1
#self.num_levels_descriptor = 1 #self.num_levels
pass
# actual descriptor initialization
if self.descriptor_type == FeatureDescriptorTypes.SIFT or self.descriptor_type == FeatureDescriptorTypes.ROOT_SIFT:
sift = self.SIFT_create(nOctaveLayers=3)
if self.descriptor_type == FeatureDescriptorTypes.ROOT_SIFT:
self._feature_descriptor = RootSIFTFeature2D(sift)
else:
self._feature_descriptor = sift
#
#
elif self.descriptor_type == FeatureDescriptorTypes.SURF:
self.oriented_features = True # SURF computes the keypoint orientation
self._feature_descriptor = self.SURF_create(nOctaves = self.num_levels_descriptor, nOctaveLayers=3)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.ORB:
self._feature_descriptor = self.ORB_create(**self.orb_params)
#self.oriented_features = False # N.B: ORB descriptor does not compute orientation on its own
#
#
elif self.descriptor_type == FeatureDescriptorTypes.ORB2:
self._feature_descriptor = self.ORB_create(**self.orb_params)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.BRISK:
self.oriented_features = True # BRISK computes the keypoint orientation
self._feature_descriptor = self.BRISK_create(octaves=self.num_levels_descriptor)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.KAZE:
if not self.is_detector_equal_to_descriptor:
Printer.red('WARNING: KAZE descriptors can only be used with KAZE or AKAZE keypoints.') # https://kyamagu.github.io/mexopencv/matlab/AKAZE.html
self._feature_descriptor = self.KAZE_create(nOctaves=self.num_levels_descriptor)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.AKAZE:
if not self.is_detector_equal_to_descriptor:
Printer.red('WARNING: AKAZE descriptors can only be used with KAZE or AKAZE keypoints.') # https://kyamagu.github.io/mexopencv/matlab/AKAZE.html
self._feature_descriptor = self.AKAZE_create(nOctaves=self.num_levels_descriptor)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.FREAK:
self.oriented_features = True # FREAK computes the keypoint orientation
self._feature_descriptor = self.FREAK_create(nOctaves=self.num_levels_descriptor)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.SUPERPOINT:
if self.detector_type != FeatureDetectorTypes.SUPERPOINT:
raise ValueError("You cannot use SUPERPOINT descriptor without SUPERPOINT detector!\nPlease, select SUPERPOINT as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse the same SuperPointDector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.TFEAT:
self._feature_descriptor = TfeatFeature2D()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.BOOST_DESC:
self.do_keypoints_size_rescaling = False # below a proper keypoint size scale factor is set depending on the used detector
boost_des_keypoint_size_scale_factor = 1.5
# from https://docs.opencv.org/3.4.2/d1/dfd/classcv_1_1xfeatures2d_1_1BoostDesc.html#details
#scale_factor: adjust the sampling window of detected keypoints 6.25f is default and fits for KAZE, SURF
# detected keypoints window ratio 6.75f should be the scale for SIFT
# detected keypoints window ratio 5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK
# keypoints window ratio 0.75f should be the scale for ORB
# keypoints ratio 1.50f was the default in original implementation
if self.detector_type in [FeatureDetectorTypes.KAZE, FeatureDetectorTypes.SURF]:
boost_des_keypoint_size_scale_factor = 6.25
elif self.detector_type == FeatureDetectorTypes.SIFT:
boost_des_keypoint_size_scale_factor = 6.75
elif self.detector_type in [FeatureDetectorTypes.AKAZE, FeatureDetectorTypes.AGAST, FeatureDetectorTypes.FAST, FeatureDetectorTypes.BRISK]:
boost_des_keypoint_size_scale_factor = 5.0
elif self.detector_type == FeatureDetectorTypes.ORB:
boost_des_keypoint_size_scale_factor = 0.75
self._feature_descriptor = self.BoostDesc_create(scale_factor=boost_des_keypoint_size_scale_factor)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.DAISY:
self._feature_descriptor = self.DAISY_create()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.LATCH:
self._feature_descriptor = self.LATCH_create()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.LUCID:
self._feature_descriptor = self.LUCID_create(lucid_kernel=1, # =1
blur_kernel=3 ) # =2
self.need_color_image = True
#
#
elif self.descriptor_type == FeatureDescriptorTypes.VGG:
self._feature_descriptor = self.VGG_create()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.HARDNET:
self._feature_descriptor = HardnetFeature2D(do_cuda=True)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.GEODESC:
self._feature_descriptor = GeodescFeature2D()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.SOSNET:
self._feature_descriptor = SosnetFeature2D()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.L2NET:
#self._feature_descriptor = L2NetKerasFeature2D() # keras-tf version
self._feature_descriptor = L2NetFeature2D()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.LOGPOLAR:
self._feature_descriptor = LogpolarFeature2D()
#
#
elif self.descriptor_type == FeatureDescriptorTypes.D2NET:
self.need_color_image = True
if self.detector_type != FeatureDetectorTypes.D2NET:
raise ValueError("You cannot use D2NET descriptor without D2NET detector!\nPlease, select D2NET as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse detector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.DELF:
self.need_color_image = True
if self.detector_type != FeatureDetectorTypes.DELF:
raise ValueError("You cannot use DELF descriptor without DELF detector!\nPlease, select DELF as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse detector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.CONTEXTDESC:
self.need_color_image = True
if self.detector_type != FeatureDetectorTypes.CONTEXTDESC:
raise ValueError("You cannot use CONTEXTDESC descriptor without CONTEXTDESC detector!\nPlease, select CONTEXTDESC as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse detector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.LFNET:
self.need_color_image = True
if self.detector_type != FeatureDetectorTypes.LFNET:
raise ValueError("You cannot use LFNET descriptor without LFNET detector!\nPlease, select LFNET as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse detector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.R2D2:
self.oriented_features = False
self.need_color_image = True
if self.detector_type != FeatureDetectorTypes.R2D2:
raise ValueError("You cannot use R2D2 descriptor without R2D2 detector!\nPlease, select R2D2 as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse detector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.KEYNET:
self.oriented_features = False
if self.detector_type != FeatureDetectorTypes.KEYNET:
raise ValueError("You cannot use KEYNET internal descriptor without KEYNET detector!\nPlease, select KEYNET as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse detector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.BEBLID:
BEBLID_SIZE_256_BITS = 101 # https://docs.opencv.org/master/d7/d99/classcv_1_1xfeatures2d_1_1BEBLID.html
BEBLID_scale_factor = 1.0 # it depends on the used detector https://docs.opencv.org/master/d7/d99/classcv_1_1xfeatures2d_1_1BEBLID.html#a38997aa059977abf6a2d6bf462d50de0a7b2a1e106c93d76cdfe5cef053277a04
# TODO: adapt BEBLID scale factor to actual used detector
# 1.0 is OK for ORB2 detector
self._feature_descriptor = self.BEBLID_create(BEBLID_scale_factor, BEBLID_SIZE_256_BITS)
#
#
elif self.descriptor_type == FeatureDescriptorTypes.DISK:
self.oriented_features = False
if self.detector_type != FeatureDetectorTypes.DISK:
raise ValueError("You cannot use DISK internal descriptor without DISK detector!\nPlease, select DISK as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse detector object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.NONE:
self._feature_descriptor = None
else:
raise ValueError("Unknown feature descriptor %s" % self.descriptor_type)
# --------------------------------------------- #
# init from FeatureInfo
# --------------------------------------------- #
# get and set norm type
try:
self.norm_type = FeatureInfo.norm_type[self.descriptor_type]
except:
Printer.red('You did not set the norm type for: ', self.descriptor_type.name)
raise ValueError("Unmanaged norm type for feature descriptor %s" % self.descriptor_type.name)
# set descriptor distance functions
if self.norm_type == cv2.NORM_HAMMING:
self.descriptor_distance = hamming_distance
self.descriptor_distances = hamming_distances
if self.norm_type == cv2.NORM_L2:
self.descriptor_distance = l2_distance
self.descriptor_distances = l2_distances
# get and set reference max descriptor distance
try:
Parameters.kMaxDescriptorDistance = FeatureInfo.max_descriptor_distance[self.descriptor_type]
except:
Printer.red('You did not set the reference max descriptor distance for: ', self.descriptor_type.name)
raise ValueError("Unmanaged max descriptor distance for feature descriptor %s" % self.descriptor_type.name)
Parameters.kMaxDescriptorDistanceSearchEpipolar = Parameters.kMaxDescriptorDistance
# --------------------------------------------- #
# other required initializations
# --------------------------------------------- #
if not self.oriented_features:
Printer.orange('WARNING: using NON-ORIENTED features: ', self.detector_type.name,'-',self.descriptor_type.name, ' (i.e. kp.angle=0)')
if self.is_detector_equal_to_descriptor and \
( self.detector_type == FeatureDetectorTypes.SIFT or
self.detector_type == FeatureDetectorTypes.ROOT_SIFT or
self.detector_type == FeatureDetectorTypes.CONTEXTDESC ):
self.init_sigma_levels_sift()
else:
self.init_sigma_levels()
if self.use_bock_adaptor:
self.block_adaptor = BlockAdaptor(self._feature_detector, self._feature_descriptor)
if self.use_pyramid_adaptor:
self.pyramid_params = dict(detector=self._feature_detector,
descriptor=self._feature_descriptor,
num_features = self.num_features,
num_levels=self.num_levels,
scale_factor=self.scale_factor,
sigma0=self.sigma_level0,
first_level=self.first_level,
pyramid_type=self.pyramid_type,
use_block_adaptor=self.use_bock_adaptor,
do_parallel = self.pyramid_do_parallel,
do_sat_features_per_level = self.do_sat_features_per_level)
self.pyramid_adaptor = PyramidAdaptor(**self.pyramid_params)
def set_sift_parameters(self):
# N.B.: The number of SIFT octaves is automatically computed from the image resolution,
# here we can set the number of layers in each octave.
# from https://docs.opencv.org/3.4/d5/d3c/classcv_1_1xfeatures2d_1_1SIFT.html
#self.intra_layer_factor = 1.2599 # num layers = nOctaves*nOctaveLayers scale=2^(1/nOctaveLayers) = 1.2599
self.scale_factor = 2 # force scale factor = 2 between octaves
self.sigma_level0 = 1.6 # https://github.com/opencv/opencv/blob/173442bb2ecd527f1884d96d7327bff293f0c65a/modules/nonfree/src/sift.cpp#L118
# from https://docs.opencv.org/3.1.0/da/df5/tutorial_py_sift_intro.html
self.first_level = -1 # https://github.com/opencv/opencv/blob/173442bb2ecd527f1884d96d7327bff293f0c65a/modules/nonfree/src/sift.cpp#L731
# initialize scale factors, sigmas for each octave level;
# these are used for managing image pyramids and weighting (information matrix) reprojection error terms in the optimization
def init_sigma_levels(self):
print('num_levels: ', self.num_levels)
num_levels = max(kNumLevelsInitSigma, self.num_levels)
self.inv_scale_factor = 1./self.scale_factor
self.scale_factors = np.zeros(num_levels)
self.level_sigmas2 = np.zeros(num_levels)
self.level_sigmas = np.zeros(num_levels)
self.inv_scale_factors = np.zeros(num_levels)
self.inv_level_sigmas2 = np.zeros(num_levels)
self.log_scale_factor = math.log(self.scale_factor)
self.scale_factors[0] = 1.0
self.level_sigmas2[0] = self.sigma_level0*self.sigma_level0
self.level_sigmas[0] = math.sqrt(self.level_sigmas2[0])
for i in range(1,num_levels):
self.scale_factors[i] = self.scale_factors[i-1]*self.scale_factor
self.level_sigmas2[i] = self.scale_factors[i]*self.scale_factors[i]*self.level_sigmas2[0]
self.level_sigmas[i] = math.sqrt(self.level_sigmas2[i])
for i in range(num_levels):
self.inv_scale_factors[i] = 1.0/self.scale_factors[i]
self.inv_level_sigmas2[i] = 1.0/self.level_sigmas2[i]
#print('self.scale_factor: ', self.scale_factor)
#print('self.scale_factors: ', self.scale_factors)
#print('self.level_sigmas: ', self.level_sigmas)
#print('self.inv_scale_factors: ', self.inv_scale_factors)
# initialize scale factors, sigmas for each octave level;
# these are used for managing image pyramids and weighting (information matrix) reprojection error terms in the optimization;
# this method can be used only when the following mapping is adopted for SIFT:
# keypoint.octave = (unpacked_octave+1)*3+unpacked_layer where S=3 is the number of levels per octave
def init_sigma_levels_sift(self):
print('initializing SIFT sigma levels')
print('num_levels: ', self.num_levels)
self.num_levels = 3*self.num_levels + 3 # we map: level=keypoint.octave = (unpacked_octave+1)*3+unpacked_layer where S=3 is the number of scales per octave
num_levels = max(kNumLevelsInitSigma, self.num_levels)
#print('num_levels: ', num_levels)
# N.B: if we adopt the mapping: keypoint.octave = (unpacked_octave+1)*3+unpacked_layer
# then we can consider a new virtual scale_factor = 2^(1/3) (used between two contiguous layers of the same octave)
print('original scale factor: ', self.scale_factor)
self.scale_factor = math.pow(2,1./3)
self.inv_scale_factor = 1./self.scale_factor
self.scale_factors = np.zeros(num_levels)
self.level_sigmas2 = np.zeros(num_levels)
self.level_sigmas = np.zeros(num_levels)
self.inv_scale_factors = np.zeros(num_levels)
self.inv_level_sigmas2 = np.zeros(num_levels)
self.log_scale_factor = math.log(self.scale_factor)
self.sigma_level0 = 1.6 # https://github.com/opencv/opencv/blob/173442bb2ecd527f1884d96d7327bff293f0c65a/modules/nonfree/src/sift.cpp#L118
# from https://docs.opencv.org/3.1.0/da/df5/tutorial_py_sift_intro.html
sigma_level02 = self.sigma_level0*self.sigma_level0
# N.B.: these are used only when recursive filtering is applied: see https://www.vlfeat.org/api/sift.html#sift-tech-ss
#sift_init_sigma = 0.5
#sift_init_sigma2 = 0.25
# see also https://www.vlfeat.org/api/sift.html
self.scale_factors[0] = 1.0
self.level_sigmas2[0] = sigma_level02 # -4*sift_init_sigma2 N.B.: this is an absolute sigma,
# not a delta_sigma used for incrementally filtering contiguos layers => we must not subtract (4*sift_init_sigma2)
# https://github.com/opencv/opencv/blob/173442bb2ecd527f1884d96d7327bff293f0c65a/modules/nonfree/src/sift.cpp#L197
self.level_sigmas[0] = math.sqrt(self.level_sigmas2[0])
for i in range(1,num_levels):
self.scale_factors[i] = self.scale_factors[i-1]*self.scale_factor
self.level_sigmas2[i] = self.scale_factors[i]*self.scale_factors[i]*sigma_level02 # https://github.com/opencv/opencv/blob/173442bb2ecd527f1884d96d7327bff293f0c65a/modules/nonfree/src/sift.cpp#L224
self.level_sigmas[i] = math.sqrt(self.level_sigmas2[i])
for i in range(num_levels):
self.inv_scale_factors[i] = 1.0/self.scale_factors[i]
self.inv_level_sigmas2[i] = 1.0/self.level_sigmas2[i]
#print('self.scale_factor: ', self.scale_factor)
#print('self.scale_factors: ', self.scale_factors)
#print('self.level_sigmas: ', self.level_sigmas)
#print('self.inv_scale_factors: ', self.inv_scale_factors)
# filter matches by using
# Non-Maxima Suppression (NMS) based on kd-trees
# or SSC NMS (https://github.com/BAILOOL/ANMS-Codes)
# or SAT (get features with best responses)
# or OCTREE_NMS (implemented in ORBSLAM2, distribution of features in a quad-tree)
def filter_keypoints(self, type, frame, kps, des=None):
filter_name = type.name
if type == KeyPointFilterTypes.NONE:
pass
elif type == KeyPointFilterTypes.KDT_NMS:
kps, des = kdt_nms(kps, des, self.num_features)
elif type == KeyPointFilterTypes.SSC_NMS:
kps, des = ssc_nms(kps, des, frame.shape[1], frame.shape[0], self.num_features)
elif type == KeyPointFilterTypes.OCTREE_NMS:
if des is not None:
raise ValueError('at the present time, you cannot use OCTREE_NMS with descriptors')
kps = octree_nms(frame, kps, self.num_features)
elif type == KeyPointFilterTypes.GRID_NMS:
kps, des, _ = grid_nms(kps, des, frame.shape[0], frame.shape[1], self.num_features, dist_thresh=4)
elif type == KeyPointFilterTypes.SAT:
if len(kps) > self.num_features:
kps, des = sat_num_features(kps, des, self.num_features)
else:
raise ValueError("Unknown match-filter type")
return kps, des, filter_name
def rescale_keypoint_size(self, kps):
# if keypoints are FAST, etc. then rescale their small sizes
# in order to let descriptors compute an encoded representation with a decent patch size
scale = 1
doit = False
if self.detector_type == FeatureDetectorTypes.FAST:
scale = kFASTKeyPointSizeRescaleFactor
doit = True
elif self.detector_type == FeatureDetectorTypes.AGAST:
scale = kAGASTKeyPointSizeRescaleFactor
doit = True
elif self.detector_type == FeatureDetectorTypes.SHI_TOMASI or self.detector_type == FeatureDetectorTypes.GFTT:
scale = kShiTomasiKeyPointSizeRescaleFactor
doit = True
if doit:
for kp in kps:
kp.size *= scale
# detect keypoints without computing their descriptors
# out: kps (array of cv2.KeyPoint)
def detect(self, frame, mask=None, filter=True):
if not self.need_color_image and frame.ndim>2: # check if we have to convert to gray image
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
if self.use_pyramid_adaptor:
# detection with pyramid adaptor (it can optionally include a block adaptor per level)
kps = self.pyramid_adaptor.detect(frame, mask)
elif self.use_bock_adaptor:
# detection with block adaptor
kps = self.block_adaptor.detect(frame, mask)
else:
# standard detection
kps = self._feature_detector.detect(frame, mask)
# filter keypoints
filter_name = 'NONE'
if filter:
kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps)
# if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors
if self.do_keypoints_size_rescaling:
self.rescale_keypoint_size(kps)
if kDrawOriginalExtractedFeatures: # draw the original features
imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0)
cv2.imshow('detected keypoints',imgDraw)
if kVerbose:
print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']')
return kps
# compute the descriptors once given the keypoints
def compute(self, frame, kps, filter = True):
if not self.need_color_image and frame.ndim>2: # check if we have to convert to gray image
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
kps, des = self._feature_descriptor.compute(frame, kps) # then, compute descriptors
# filter keypoints
filter_name = 'NONE'
if filter:
kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des)
if kVerbose:
print('descriptor:',self.descriptor_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']')
return kps, des
# detect keypoints and their descriptors
# out: kps, des
def detectAndCompute(self, frame, mask=None, filter = True):
if not self.need_color_image and frame.ndim>2: # check if we have to convert to gray image
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
if self.use_pyramid_adaptor:
# detectAndCompute with pyramid adaptor (it can optionally include a block adaptor per level)
if self.force_multiscale_detect_and_compute:
# force detectAndCompute on each level instead of first {detect() on each level} and then {compute() on resulting detected keypoints one time}
kps, des = self.pyramid_adaptor.detectAndCompute(frame, mask)
#
else:
kps = self.detect(frame, mask, filter=True) # first, detect by using adaptor on the different pyramid levels
kps, des = self.compute(frame, kps, filter=False) # then, separately compute the descriptors on detected keypoints (one time)
filter = False # disable keypoint filtering since we already applied it for detection
elif self.use_bock_adaptor:
# detectAndCompute with block adaptor (force detect/compute on each block)
#
#kps, des = self.block_adaptor.detectAndCompute(frame, mask)
#
kps = self.detect(frame, mask, filter=True) # first, detect by using adaptor
kps, des = self.compute(frame, kps, filter=False) # then, separately compute the descriptors
filter = False # disable keypoint filtering since we already applied it for detection
else:
# standard detectAndCompute
if self.is_detector_equal_to_descriptor:
# detector = descriptor => call them together with detectAndCompute() method
kps, des = self._feature_detector.detectAndCompute(frame, mask)
if kVerbose:
print('detector:', self.detector_type.name,', #features:',len(kps))
print('descriptor:', self.descriptor_type.name,', #features:',len(kps))
else:
# detector and descriptor are different => call them separately
# 1. first, detect keypoint locations
kps = self.detect(frame, mask, filter=False)
# 2. then, compute descriptors
kps, des = self._feature_descriptor.compute(frame, kps)
if kVerbose:
#print('detector: ', self.detector_type.name, ', #features: ', len(kps))
print('descriptor: ', self.descriptor_type.name, ', #features: ', len(kps))
# filter keypoints
filter_name = 'NONE'
if filter:
kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des)
if self.detector_type == FeatureDetectorTypes.SIFT or \
self.detector_type == FeatureDetectorTypes.ROOT_SIFT or \
self.detector_type == FeatureDetectorTypes.CONTEXTDESC :
unpackSiftOctaveKps(kps, method=UnpackOctaveMethod.INTRAL_LAYERS)
if kVerbose:
print('detector:',self.detector_type.name,', descriptor:', self.descriptor_type.name,', #features:', len(kps),' (#ref:', self.num_features, '), [kp-filter:',filter_name,']')
self.debug_print(kps)
return kps, des
def debug_print(self, kps):
if False:
# raw print of all keypoints
for k in kps:
print("response: ", k.response, "\t, size: ", k.size, "\t, octave: ", k.octave, "\t, angle: ", k.angle)
if False:
# generate a rough histogram for keypoint sizes
kps_sizes = [kp.size for kp in kps]
kps_sizes_histogram = np.histogram(kps_sizes, bins=10)
print('size-histogram: \n', list(zip(kps_sizes_histogram[1],kps_sizes_histogram[0])))
# generate histogram at level 0
kps_sizes = [kp.size for kp in kps if kp.octave==1]
kps_sizes_histogram = np.histogram(kps_sizes, bins=10)
print('size-histogram at level 0: \n', list(zip(kps_sizes_histogram[1],kps_sizes_histogram[0])))
if False:
# count points for each octave => generate an octave histogram
kps_octaves = [k.octave for k in kps]
kps_octaves = Counter(kps_octaves)
print('levels-histogram: ', kps_octaves.most_common(12))
| 63,509 | 62.957704 | 272 | py |
pyslam | pyslam-master/feature_delf.py | """
* This file is part of PYSLAM
* Adapted from https://github.com/tensorflow/models/blob/master/research/delf/delf/python/examples/extract_features.py, see the license therein.
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import config
config.cfg.set_lib('delf')
import cv2
from threading import RLock
from utils_sys import Printer
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
import argparse
import os
import sys
import time
import json
import numpy as np
import h5py
if False:
import tensorflow as tf
else:
# from https://stackoverflow.com/questions/56820327/the-name-tf-session-is-deprecated-please-use-tf-compat-v1-session-instead
import tensorflow.compat.v1 as tf
# from https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96 to cope with the following error:
# "[...tensorflow/stream_executor/cuda/cuda_dnn.cc:329] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR"
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.gpu_options.per_process_gpu_memory_fraction=0.333 # from https://stackoverflow.com/questions/34199233/how-to-prevent-tensorflow-from-allocating-the-totality-of-a-gpu-memory
#session = tf.Session(config=tf_config, ...)
from google.protobuf import text_format
from tensorflow.python.platform import app
# from delf import delf_config_pb2
# from delf import feature_extractor
# from delf import feature_io
from delf.protos import aggregation_config_pb2
from delf.protos import box_pb2
from delf.protos import datum_pb2
from delf.protos import delf_config_pb2
from delf.protos import feature_pb2
from delf.python import box_io
from delf.python import datum_io
from delf.python import delf_v1
from delf.python import feature_aggregation_extractor
from delf.python import feature_aggregation_similarity
from delf.python import feature_extractor
from delf.python import feature_io
from delf.python.examples import detector
from delf.python.examples import extractor
from delf.python import detect_to_retrieve
from delf.python import google_landmarks_dataset
from utils_tf import set_tf_logging
#from utils import print_options
delf_base_path = config.cfg.root_folder + '/thirdparty/tensorflow_models/research/delf/delf/python/'
delf_config_file = delf_base_path + 'examples/delf_config_example.pbtxt'
delf_model_path = delf_base_path + 'examples/parameters/delf_gld_20190411/model/'
delf_mean_path = delf_base_path + 'examples/parameters/delf_gld_20190411/pca/mean.datum'
delf_projection_matrix_path = delf_base_path + 'examples/parameters/delf_gld_20190411/pca/pca_proj_mat.datum'
kVerbose = True
def MakeExtractor(sess, config, import_scope=None):
"""Creates a function to extract features from an image.
Args:
sess: TensorFlow session to use.
config: DelfConfig proto containing the model configuration.
import_scope: Optional scope to use for model.
Returns:
Function that receives an image and returns features.
"""
tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], config.model_path, import_scope=import_scope)
import_scope_prefix = import_scope + '/' if import_scope is not None else ''
input_image = sess.graph.get_tensor_by_name('%sinput_image:0' % import_scope_prefix)
input_score_threshold = sess.graph.get_tensor_by_name('%sinput_abs_thres:0' % import_scope_prefix)
input_image_scales = sess.graph.get_tensor_by_name('%sinput_scales:0' % import_scope_prefix)
input_max_feature_num = sess.graph.get_tensor_by_name('%sinput_max_feature_num:0' % import_scope_prefix)
boxes = sess.graph.get_tensor_by_name('%sboxes:0' % import_scope_prefix)
raw_descriptors = sess.graph.get_tensor_by_name('%sfeatures:0' % import_scope_prefix)
feature_scales = sess.graph.get_tensor_by_name('%sscales:0' % import_scope_prefix)
attention_with_extra_dim = sess.graph.get_tensor_by_name('%sscores:0' % import_scope_prefix)
attention = tf.reshape(attention_with_extra_dim,[tf.shape(attention_with_extra_dim)[0]])
locations, descriptors = feature_extractor.DelfFeaturePostProcessing(boxes, raw_descriptors, config)
def ExtractorFn(image):
"""Receives an image and returns DELF features.
Args:
image: Uint8 array with shape (height, width 3) containing the RGB image.
Returns:
Tuple (locations, descriptors, feature_scales, attention)
"""
return sess.run([locations, descriptors, feature_scales, attention],
feed_dict={
input_image: image,
input_score_threshold:
config.delf_local_config.score_threshold,
input_image_scales: list(config.image_scales),
input_max_feature_num:
config.delf_local_config.max_feature_num
})
return ExtractorFn
# convert matrix of pts into list of keypoints
def convert_pts_to_keypoints(pts, scores, sizes):
assert(len(pts)==len(scores))
kps = []
if pts is not None:
# convert matrix [Nx2] of pts into list of keypoints
kps = [ cv2.KeyPoint(p[0], p[1], _size=sizes[i], _response=scores[i]) for i,p in enumerate(pts) ]
return kps
# interface for pySLAM
class DelfFeature2D:
def __init__(self,
num_features=1000,
score_threshold=100,
do_tf_logging=False):
print('Using DelfFeature2D')
self.lock = RLock()
set_tf_logging(do_tf_logging)
# Parse DelfConfig proto.
self.delf_config = delf_config_pb2.DelfConfig()
with tf.gfile.FastGFile(delf_config_file, 'r') as f:
text_format.Merge(f.read(), self.delf_config)
self.delf_config.model_path = delf_model_path
self.delf_config.delf_local_config.pca_parameters.mean_path = delf_mean_path
self.delf_config.delf_local_config.pca_parameters.projection_matrix_path = delf_projection_matrix_path
self.delf_config.delf_local_config.max_feature_num = num_features
self.delf_config.delf_local_config.score_threshold = score_threshold
print('DELF CONFIG\n:', self.delf_config)
self.keypoint_size = 30 # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint
self.image_scales = list(self.delf_config.image_scales)
#print('image scales: ',self.image_scales)
try:
self.scale_factor = self.image_scales[1]/self.image_scales[0]
except:
self.scale_factor = np.sqrt(2) # according to default config and the paper
#print('scale_factor: ',self.scale_factor)
#self.image_levels = np.round(-np.log(self.image_scales)/np.log(self.scale_factor)).astype(np.int32)
#print('image levels: ',self.image_levels)
self.session = None
self.pts = []
self.kps = []
self.des = []
self.scales = []
self.scores = []
self.frame = None
print('==> Loading pre-trained network.')
self.load_model()
print('==> Successfully loaded pre-trained network.')
@property
def num_features(self):
return self.delf_config.delf_local_config.max_feature_num
@property
def score_threshold(self):
return self.delf_config.delf_local_config.score_threshold
def __del__(self):
self.close()
def load_model(self):
# Create graph before session :)
self.graph = tf.Graph().as_default()
self.session = tf.Session()
init_op = tf.global_variables_initializer()
self.session.run(init_op)
self.extractor_fn = MakeExtractor(self.session, self.delf_config)
def close(self):
if self.session is not None:
print('DELF: closing tf session')
self.session.close()
tf.reset_default_graph()
def compute_kps_des(self, frame):
with self.lock:
image_tf = tf.convert_to_tensor(frame, np.float32)
im = self.session.run(image_tf)
# Extract and save features.
(locations_out, descriptors_out, feature_scales_out, attention_out) = self.extractor_fn(im)
self.pts = locations_out[:, ::-1]
self.des = descriptors_out
self.scales = feature_scales_out
self.scores = attention_out
# N.B.: according to the paper "Large-Scale Image Retrieval with Attentive Deep Local Features":
# We construct image pyramids by using scales that are a 2 factor apart. For the set of scales
# with range from 0.25 to 2.0, 7 different scales are used.
# The size of receptive field is inversely proportional to the scale; for example, for the 2.0 scale, the
# receptive field of the network covers 146 × 146 pixels.
# The receptive field size for the image at the original scale is 291 × 291.
#sizes = self.keypoint_size * 1./self.scales
sizes = self.keypoint_size * self.scales
if False:
# print('kps.shape', self.pts.shape)
# print('des.shape', self.des.shape)
# print('scales.shape', self.scales.shape)
# print('scores.shape', self.scores.shape)
print('scales:',self.scales)
print('sizes:',sizes)
self.kps = convert_pts_to_keypoints(self.pts, self.scores, sizes)
return self.kps, self.des
def detectAndCompute(self, frame, mask=None): #mask is a fake input
with self.lock:
self.frame = frame
self.kps, self.des = self.compute_kps_des(frame)
if kVerbose:
print('detector: DELF, descriptor: DELF, #features: ', len(self.kps), ', frame res: ', frame.shape[0:2])
return self.kps, self.des
# return keypoints if available otherwise call detectAndCompute()
def detect(self, frame, mask=None): # mask is a fake input
with self.lock:
#if self.frame is not frame:
self.detectAndCompute(frame)
return self.kps
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
Printer.orange('WARNING: DELF is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, self.des
| 12,100 | 40.871972 | 183 | py |
pyslam | pyslam-master/feature_keynet.py | """
* This file is part of PYSLAM
*
* Adpated from https://raw.githubusercontent.com/axelBarroso/Key.Net/master/extract_multiscale_features.py, see the license therein.
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import config
config.cfg.set_lib('keynet')
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
import os, sys, cv2
#sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from os import path, mkdir
import argparse
import keyNet.aux.tools as aux
from skimage.transform import pyramid_gaussian
import HSequences_bench.tools.geometry_tools as geo_tools
import HSequences_bench.tools.repeatability_tools as rep_tools
from keyNet.model.keynet_architecture import *
import keyNet.aux.desc_aux_function as loss_desc
from keyNet.model.hardnet_pytorch import *
from keyNet.datasets.dataset_utils import read_bw_image
import torch
from threading import RLock
from utils_tf import set_tf_logging
from utils_sys import Printer, print_options
kVerbose = True
def build_keynet_config(keynet_base_path):
parser = argparse.ArgumentParser(description='HSequences Extract Features')
# parser.add_argument('--list-images', type=str, help='File containing the image paths for extracting features.',
# required=True)
# parser.add_argument('--results-dir', type=str, default='extracted_features/',
# help='The output path to save the extracted keypoint.')
parser.add_argument('--network-version', type=str, default='KeyNet_default',
help='The Key.Net network version name')
parser.add_argument('--checkpoint-det-dir', type=str, default=keynet_base_path + 'keyNet/pretrained_nets/KeyNet_default',
help='The path to the checkpoint file to load the detector weights.')
parser.add_argument('--pytorch-hardnet-dir', type=str, default=keynet_base_path + 'keyNet/pretrained_nets/HardNet++.pth',
help='The path to the checkpoint file to load the HardNet descriptor weights.')
# Detector Settings
parser.add_argument('--num-filters', type=int, default=8,
help='The number of filters in each learnable block.')
parser.add_argument('--num-learnable-blocks', type=int, default=3,
help='The number of learnable blocks after handcrafted block.')
parser.add_argument('--num-levels-within-net', type=int, default=3,
help='The number of pyramid levels inside the architecture.')
parser.add_argument('--factor-scaling-pyramid', type=float, default=1.2,
help='The scale factor between the multi-scale pyramid levels in the architecture.')
parser.add_argument('--conv-kernel-size', type=int, default=5,
help='The size of the convolutional filters in each of the learnable blocks.')
# Multi-Scale Extractor Settings
parser.add_argument('--extract-MS', type=bool, default=True,
help='Set to True if you want to extract multi-scale features.')
parser.add_argument('--num-points', type=int, default=2000,
help='The number of desired features to extract.')
parser.add_argument('--nms-size', type=int, default=15,
help='The NMS size for computing the validation repeatability.')
parser.add_argument('--border-size', type=int, default=15,
help='The number of pixels to remove from the borders to compute the repeatability.')
parser.add_argument('--order-coord', type=str, default='xysr',
help='The coordinate order that follows the extracted points. Use yxsr or xysr.')
parser.add_argument('--random-seed', type=int, default=12345,
help='The random seed value for TensorFlow and Numpy.')
parser.add_argument('--pyramid_levels', type=int, default=5,
help='The number of downsample levels in the pyramid.')
parser.add_argument('--upsampled-levels', type=int, default=1,
help='The number of upsample levels in the pyramid.')
parser.add_argument('--scale-factor-levels', type=float, default=np.sqrt(2),
help='The scale factor between the pyramid levels.')
parser.add_argument('--scale-factor', type=float, default=2.,
help='The scale factor to extract patches before descriptor.')
# GPU Settings
parser.add_argument('--gpu-memory-fraction', type=float, default=0.3,
help='The fraction of GPU used by the script.')
parser.add_argument('--gpu-visible-devices', type=str, default="0",
help='Set CUDA_VISIBLE_DEVICES variable.')
args = parser.parse_known_args()[0]
# remove verbose bits from tf
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# tf.logging.set_verbosity(tf.logging.ERROR)
# Set CUDA GPU environment
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_visible_devices
print('Using KeyNet version:' + args.network_version)
if not args.extract_MS:
args.pyramid_levels = 0
args.upsampled_levels = 0
return args
# convert matrix of pts into list of keypoints
def convert_pts_to_keypoints(pts, scores, sizes, levels):
assert(len(pts)==len(scores))
kps = []
if pts is not None:
# convert matrix [Nx2] of pts into list of keypoints
kps = [ cv2.KeyPoint(p[0], p[1], _size=sizes[i], _response=scores[i], _octave=levels[i]) for i,p in enumerate(pts) ]
return kps
# interface for pySLAM
class KeyNetDescFeature2D:
def __init__(self,
num_features=2000,
num_levels=5, # The number of downsample levels in the pyramid.
scale_factor=2, # The scale factor to extract patches before descriptor.
scale_factor_levels=np.sqrt(2), # The scale factor between the pyramid levels.
do_cuda=True,
do_tf_logging=False):
print('Using KeyNetDescFeature2D')
self.lock = RLock()
self.model_base_path = config.cfg.root_folder + '/thirdparty/keynet/'
set_tf_logging(do_tf_logging)
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
device = torch.device("cuda:0" if self.do_cuda else "cpu")
self.session = None
self.keypoint_size = 8 # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint
self.pts = []
self.kps = []
self.des = []
self.scales = []
self.scores = []
self.frame = None
keynet_config = build_keynet_config(self.model_base_path)
self.keynet_config = keynet_config
keynet_config.num_points = num_features
keynet_config.pyramid_levels = num_levels
keynet_config.scale_factor = scale_factor
keynet_config.scale_factor_levels = scale_factor_levels
print_options(self.keynet_config,'KEYNET CONFIG')
print('==> Loading pre-trained network.')
self.load_model()
print('==> Successfully loaded pre-trained network.')
@property
def num_features(self):
return self.keynet_config.num_points
@property
def num_levels(self):
return self.keynet_config.pyramid_levels
@property
def scale_factor(self):
return self.keynet_config.scale_factor
def __del__(self):
self.close()
def close(self):
if self.session is not None:
print('KEYNET: closing tf session')
self.session.close()
tf.reset_default_graph()
def load_model(self):
# Create graph before session :)
self.graph = tf.Graph().as_default()
# GPU Usage
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = self.keynet_config.gpu_memory_fraction
tf_config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
self.session = tf.Session(config=tf_config)
tf.set_random_seed(self.keynet_config.random_seed)
with tf.name_scope('inputs'):
# Define the input tensor shape
tensor_input_shape = (None, None, None, 1)
self.input_network = tf.placeholder(dtype=tf.float32, shape=tensor_input_shape, name='input_network')
self.dimension_image = tf.placeholder(dtype=tf.int32, shape=(3,), name='dimension_image')
self.kpts_coord = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='kpts_coord')
self.kpts_batch = tf.placeholder(dtype=tf.int32, shape=(None,), name='kpts_batch')
self.kpts_scale = tf.placeholder(dtype=tf.float32, name='kpts_scale')
self.phase_train = tf.placeholder(tf.bool, name='phase_train')
with tf.name_scope('model_deep_detector'):
deep_architecture = keynet(self.keynet_config)
output_network = deep_architecture.model(self.input_network, self.phase_train, self.dimension_image, reuse=False)
self.maps = tf.nn.relu(output_network['output'])
# Extract Patches from inputs:
self.input_patches = loss_desc.build_patch_extraction(self.kpts_coord, self.kpts_batch, self.input_network, kpts_scale=self.kpts_scale)
# Define Pytorch HardNet
self.model = HardNet()
checkpoint = torch.load(self.keynet_config.pytorch_hardnet_dir)
self.model.load_state_dict(checkpoint['state_dict'])
if self.do_cuda:
self.model.cuda()
print('Extracting torch model on GPU')
else:
print('Extracting torch model on CPU')
self.model = model.cpu()
self.model.eval()
# Define variables
detect_var = [v for v in tf.trainable_variables(scope='model_deep_detector')]
if os.listdir(self.keynet_config.checkpoint_det_dir):
init_assign_op_det, init_feed_dict_det = tf_contrib.framework.assign_from_checkpoint(
tf.train.latest_checkpoint(self.keynet_config.checkpoint_det_dir), detect_var)
point_level = []
tmp = 0.0
factor_points = (self.keynet_config.scale_factor_levels ** 2)
self.levels = self.keynet_config.pyramid_levels + self.keynet_config.upsampled_levels + 1
#print('levels: ', [i for i in range(self.levels)])
for idx_level in range(self.levels):
tmp += factor_points ** (-1 * (idx_level - self.keynet_config.upsampled_levels))
point_level.append(self.keynet_config.num_points * factor_points ** (-1 * (idx_level - self.keynet_config.upsampled_levels)))
self.point_level = np.asarray(list(map(lambda x: int(x / tmp), point_level)))
#print('self.point_level:',self.point_level)
self.session.run(tf.global_variables_initializer())
if os.listdir(self.keynet_config.checkpoint_det_dir):
self.session.run(init_assign_op_det, init_feed_dict_det)
def extract_keynet_features(self, image):
pyramid = pyramid_gaussian(image, max_layer=self.keynet_config.pyramid_levels, downscale=self.keynet_config.scale_factor_levels)
score_maps = {}
for (j, resized) in enumerate(pyramid):
im = resized.reshape(1, resized.shape[0], resized.shape[1], 1)
feed_dict = {
self.input_network: im,
self.phase_train: False,
self.dimension_image: np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
}
im_scores = self.session.run(self.maps, feed_dict=feed_dict)
im_scores = geo_tools.remove_borders(im_scores, borders=self.keynet_config.border_size)
score_maps['map_' + str(j + 1 + self.keynet_config.upsampled_levels)] = im_scores[0, :, :, 0]
if self.keynet_config.upsampled_levels:
for j in range(self.keynet_config.upsampled_levels):
factor = self.keynet_config.scale_factor_levels ** (self.keynet_config.upsampled_levels - j)
up_image = cv2.resize(image, (0, 0), fx=factor, fy=factor)
im = np.reshape(up_image, (1, up_image.shape[0], up_image.shape[1], 1))
feed_dict = {
self.input_network: im,
self.phase_train: False,
self.dimension_image: np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
}
im_scores = self.session.run(self.maps, feed_dict=feed_dict)
im_scores = geo_tools.remove_borders(im_scores, borders=self.keynet_config.border_size)
score_maps['map_' + str(j + 1)] = im_scores[0, :, :, 0]
im_pts = []
im_pts_levels = []
for idx_level in range(self.levels):
scale_value = (self.keynet_config.scale_factor_levels ** (idx_level - self.keynet_config.upsampled_levels))
scale_factor = 1. / scale_value
h_scale = np.asarray([[scale_factor, 0., 0.], [0., scale_factor, 0.], [0., 0., 1.]])
h_scale_inv = np.linalg.inv(h_scale)
h_scale_inv = h_scale_inv / h_scale_inv[2, 2]
num_points_level = self.point_level[idx_level]
#print('num_points_level:',num_points_level)
if idx_level > 0:
res_points = int(np.asarray([self.point_level[a] for a in range(0, idx_level + 1)]).sum() - len(im_pts))
num_points_level = res_points
im_scores = rep_tools.apply_nms(score_maps['map_' + str(idx_level + 1)], self.keynet_config.nms_size)
im_pts_tmp = geo_tools.get_point_coordinates(im_scores, num_points=num_points_level, order_coord='xysr')
im_pts_tmp = geo_tools.apply_homography_to_points(im_pts_tmp, h_scale_inv)
if not idx_level:
im_pts = im_pts_tmp
else:
im_pts = np.concatenate((im_pts, im_pts_tmp), axis=0)
im_pts_levels_tmp = np.ones(len(im_pts),dtype=np.int32)*idx_level
im_pts_levels = np.concatenate((im_pts_levels, im_pts_levels_tmp), axis=0).astype(np.int32)
if self.keynet_config.order_coord == 'yxsr':
im_pts = np.asarray(list(map(lambda x: [x[1], x[0], x[2], x[3]], im_pts)))
sorted_idxs = (-1 * im_pts[:, 3]).argsort() # sort points with their scores
im_pts = im_pts[sorted_idxs]
im_pts_levels = im_pts_levels[sorted_idxs]
#print('im_pts_levels:',im_pts_levels)
im_pts = im_pts[:self.keynet_config.num_points]
im_pts_levels = im_pts_levels[:self.keynet_config.num_points]
# Extract descriptor from features
descriptors = []
im = image.reshape(1, image.shape[0], image.shape[1], 1)
for idx_desc_batch in range(int(len(im_pts) / 250 + 1)):
points_batch = im_pts[idx_desc_batch * 250: (idx_desc_batch + 1) * 250]
if not len(points_batch):
break
feed_dict = {
self.input_network: im,
self.phase_train: False,
self.kpts_coord: points_batch[:, :2],
self.kpts_scale: self.keynet_config.scale_factor * points_batch[:, 2],
self.kpts_batch: np.zeros(len(points_batch)),
self.dimension_image: np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
}
patch_batch = self.session.run(self.input_patches, feed_dict=feed_dict)
patch_batch = np.reshape(patch_batch, (patch_batch.shape[0], 1, 32, 32))
data_a = torch.from_numpy(patch_batch)
data_a = data_a.cuda()
data_a = Variable(data_a)
with torch.no_grad():
out_a = self.model(data_a)
desc_batch = out_a.data.cpu().numpy().reshape(-1, 128)
if idx_desc_batch == 0:
descriptors = desc_batch
else:
descriptors = np.concatenate([descriptors, desc_batch], axis=0)
return im_pts, descriptors, im_pts_levels
def compute_kps_des(self, im):
with self.lock:
im = im.astype(float) / im.max()
im_pts, descriptors, im_pts_levels = self.extract_keynet_features(im)
self.pts = im_pts[:,:2]
scales = im_pts[:,2]
scores = im_pts[:,3]
pts_levels = im_pts_levels
#print('scales:',self.scales)
self.kps = convert_pts_to_keypoints(self.pts, scores, scales*self.keypoint_size, pts_levels)
return self.kps, descriptors
def detectAndCompute(self, frame, mask=None): #mask is a fake input
with self.lock:
self.frame = frame
self.kps, self.des = self.compute_kps_des(frame)
if kVerbose:
print('detector: KEYNET, descriptor: KEYNET, #features: ', len(self.kps), ', frame res: ', frame.shape[0:2])
return self.kps, self.des
# return keypoints if available otherwise call detectAndCompute()
def detect(self, frame, mask=None): # mask is a fake input
with self.lock:
#if self.frame is not frame:
self.detectAndCompute(frame)
return self.kps
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
Printer.orange('WARNING: KEYNET is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, self.des
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
#Printer.orange('WARNING: KEYNET is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, self.des
| 19,688 | 42.463576 | 146 | py |
pyslam | pyslam-master/feature_l2net_keras.py | """
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import config
config.cfg.set_lib('l2net_keras')
import os
import sys
import time
import cv2
import numpy as np
from L2_Net import L2Net
from utils_sys import Printer
from utils_features import extract_patches_tensor, extract_patches_array, extract_patches_array_cpp
kVerbose = True
# interface for pySLAM
class L2NetKerasFeature2D:
def __init__(self, do_tf_logging=False):
print('Using L2NetKerasFeature2D')
# One of "L2Net-HP", "L2Net-HP+", "L2Net-LIB", "L2Net-LIB+", "L2Net-ND", "L2Net-ND+", "L2Net-YOS", "L2Net-YOS+",
self.net_name = 'L2Net-HP+'
# mag_factor is how many times the original keypoint scale
# is enlarged to generate a patch from a keypoint
self.mag_factor = 3
# inference batch size
self.batch_size = 512
self.process_all = True # process all the patches at once
print('==> Loading pre-trained network.')
self.l2net = L2Net(self.net_name, do_tf_logging=do_tf_logging)
print('==> Successfully loaded pre-trained network.')
def compute(self, frame, kps, mask=None): #mask is a fake input
#print('kps: ', kps)
if len(kps)>0:
if False:
# use python code
patches = extract_patches_array(frame, kps, patch_size=32, mag_factor=self.mag_factor)
else:
# use faster cpp code
patches = extract_patches_array_cpp(frame, kps, patch_size=32, mag_factor=self.mag_factor)
patches = np.asarray(patches)
patches = np.expand_dims(patches, -1)
self.des = self.l2net.calc_descriptors(patches)
else:
self.des = []
if kVerbose:
print('descriptor: L2NET, #features: ', len(kps), ', frame res: ', frame.shape[0:2])
return kps, self.des
| 2,773 | 32.829268 | 123 | py |
pyslam | pyslam-master/feature_sosnet.py | """
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import config
config.cfg.set_lib('sosnet')
import os
import numpy as np
import math
import cv2
import torch
import sosnet_model
from utils_features import extract_patches_tensor, extract_patches_array, extract_patches_array_cpp
kVerbose = True
# interface for pySLAM
class SosnetFeature2D:
def __init__(self, do_cuda=True):
print('Using SosnetFeature2D')
self.model_base_path = config.cfg.root_folder + '/thirdparty/SOSNet/'
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
device = torch.device("cuda:0" if self.do_cuda else "cpu")
torch.set_grad_enabled(False)
# mag_factor is how many times the original keypoint scale
# is enlarged to generate a patch from a keypoint
self.mag_factor = 3
print('==> Loading pre-trained network.')
#init tfeat and load the trained weights
self.model = sosnet_model.SOSNet32x32()
self.net_name = 'liberty' # liberty, hpatches_a, notredame, yosemite (see folder /thirdparty/SOSNet/sosnet-weights)
self.model.load_state_dict(torch.load(os.path.join(self.model_base_path, 'sosnet-weights', "sosnet-32x32-" + self.net_name + ".pth")))
if self.do_cuda:
self.model.cuda()
print('Extracting on GPU')
else:
print('Extracting on CPU')
self.model = model.cpu()
self.model.eval()
print('==> Successfully loaded pre-trained network.')
def compute_des(self, patches):
patches = torch.from_numpy(patches).float()
patches = torch.unsqueeze(patches,1)
if self.do_cuda:
patches = patches.cuda()
with torch.no_grad():
descrs = self.model(patches)
return descrs.detach().cpu().numpy().reshape(-1, 128)
def compute(self, frame, kps, mask=None): #mask is a fake input
#print('kps: ', kps)
if len(kps)>0:
#des = tfeat_utils.describe_opencv(self.model, frame, kps, 32, self.mag_factor)
# extract the keypoint patches
#t = time.time()
if False:
# use python code
patches = extract_patches_array(frame, kps, patch_size=32, mag_factor=self.mag_factor)
else:
# use faster cpp code
patches = extract_patches_array_cpp(frame, kps, patch_size=32, mag_factor=self.mag_factor)
patches = np.asarray(patches)
#if kVerbose:
# print('patches.shape:',patches.shape)
#if kVerbose:
# print('patch elapsed: ', time.time()-t)
# compute descriptor by feeeding the full patch tensor to the network
des = self.compute_des(patches)
else:
des = []
if kVerbose:
print('descriptor: SOSNET, #features: ', len(kps), ', frame res: ', frame.shape[0:2])
return kps, des
| 3,919 | 36.692308 | 142 | py |
pyslam | pyslam-master/feature_l2net.py | """
* This file is part of PYSLAM
* Adapted from https://github.com/vcg-uvic/image-matching-benchmark-baselines/blob/master/third_party/l2net_config/l2net_model.py, see licence therein.
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
# adapted from https://github.com/vcg-uvic/image-matching-benchmark-baselines/blob/master/third_party/l2net_config/l2net_model.py
import config
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import time
import os
import cv2
import math
import numpy as np
from utils_features import extract_patches_tensor, extract_patches_array, extract_patches_array_cpp
kVerbose = True
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sum(torch.abs(x), dim = 1) + self.eps
x= x / norm.expand_as(x)
return x
class L2Net(nn.Module):
def __init__(self):
super(L2Net, self).__init__()
self.eps = 1e-10
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(32, affine=True, eps=self.eps),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(32, affine=True, eps=self.eps),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=True),
nn.BatchNorm2d(64, affine=True, eps=self.eps),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(64, affine=True, eps=self.eps),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=True),
nn.BatchNorm2d(128, affine=True, eps=self.eps),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(128, affine=True, eps=self.eps),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=8, bias=True),
nn.BatchNorm2d(128, affine=True, eps=self.eps),
)
return
def input_norm(self, x):
# matlab norm
z = x.contiguous().transpose(2, 3).contiguous().view(x.size(0),-1)
x_minus_mean = z.transpose(0,1)-z.mean(1)
sp = torch.std(z,1).detach()
norm_inp = x_minus_mean/(sp+1e-12)
norm_inp = norm_inp.transpose(0, 1).view(-1, 1, x.size(2), x.size(3)).transpose(2,3)
return norm_inp
def forward(self, input):
norm_img = self.input_norm(input)
x_features = self.features(norm_img)
return nn.LocalResponseNorm(256,1*256,0.5,0.5)(x_features).view(input.size(0),-1)
# interface for pySLAM
class L2NetFeature2D:
def __init__(self, do_cuda=True):
print('Using L2NetFeature2D')
self.model_base_path = config.cfg.root_folder + '/thirdparty/l2net/'
self.model_weights_path = self.model_base_path + 'l2net_ported_weights_lib+.pth'
#print('model_weights_path:',self.model_weights_path)
# get pre-trained image mean
# l2net_weights = sio.loadmat(args.matlab_weights_path)
# imgMean = l2net_weights['pixMean']
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
device = torch.device("cuda:0" if self.do_cuda else "cpu")
torch.set_grad_enabled(False)
# mag_factor is how many times the original keypoint scale
# is enlarged to generate a patch from a keypoint
self.mag_factor = 1.0
# inference batch size
self.batch_size = 512
self.process_all = True # process all the patches at once
print('==> Loading pre-trained network.')
self.model = L2Net()
self.checkpoint = torch.load(self.model_weights_path)
#self.model.load_state_dict(self.checkpoint['state_dict'])
self.model.load_state_dict(self.checkpoint)
if self.do_cuda:
self.model.cuda()
print('Extracting on GPU')
else:
print('Extracting on CPU')
self.model = model.cpu()
self.model.eval()
print('==> Successfully loaded pre-trained network.')
def compute_des_batches(self, patches):
n_batches = int(len(patches) / self.batch_size) + 1
descriptors_for_net = np.zeros((len(patches), 128))
for i in range(0, len(patches), self.batch_size):
data_a = patches[i: i + self.batch_size, :, :, :].astype(np.float32)
data_a = torch.from_numpy(data_a)
if self.do_cuda:
data_a = data_a.cuda()
data_a = Variable(data_a)
# compute output
with torch.no_grad():
out_a = self.model(data_a)
descriptors_for_net[i: i + self.batch_size,:] = out_a.data.cpu().numpy().reshape(-1, 128)
return descriptors_for_net
def compute_des(self, patches):
patches = torch.from_numpy(patches).float()
patches = torch.unsqueeze(patches,1)
if self.do_cuda:
patches = patches.cuda()
with torch.no_grad():
descrs = self.model(patches)
return descrs.detach().cpu().numpy().reshape(-1, 128)
def compute(self, img, kps, mask=None): #mask is a fake input
num_kps = len(kps)
des = []
if num_kps>0:
if not self.process_all:
# compute descriptor for each patch
patches = extract_patches_tensor(img, kps, patch_size=32, mag_factor=self.mag_factor)
des = self.compute_des_batches(patches).astype(np.float32)
else:
# compute descriptor by feeeding the full patch tensor to the network
t = time.time()
if False:
# use python code
patches = extract_patches_array(img, kps, patch_size=32, mag_factor=self.mag_factor)
else:
# use faster cpp code
patches = extract_patches_array_cpp(img, kps, patch_size=32, mag_factor=self.mag_factor)
patches = np.asarray(patches)
if kVerbose:
print('patches.shape:',patches.shape)
if kVerbose:
print('patch elapsed: ', time.time()-t)
des = self.compute_des(patches)
if kVerbose:
print('descriptor: L2NET, #features: ', len(kps), ', frame res: ', img.shape[0:2])
return kps, des | 7,936 | 39.28934 | 151 | py |
pyslam | pyslam-master/feature_logpolar.py | """
* This file is part of PYSLAM
* adapted from https://github.com/cvlab-epfl/log-polar-descriptors/blob/aed70f882cddcfe0c27b65768b9248bf1f2c65cb/example.py, see licence therein.
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
# adapted from https://github.com/cvlab-epfl/log-polar-descriptors/blob/aed70f882cddcfe0c27b65768b9248bf1f2c65cb/example.py
import config
config.cfg.set_lib('logpolar')
import os
import sys
import torch
import torch.nn as nn
from modules.ptn.pytorch.models import Transformer
import cv2
import numpy as np
import h5py
from time import time
from configs.defaults import _C as cfg
#from modules.hardnet.models import HardNet # given some matplotlib backend changes the code is repeated below
from utils_features import extract_patches_tensor, extract_patches_array, extract_patches_array_cpp
kVerbose = True
kVerbose2 = True
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm, self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim=1) + self.eps)
x = x / norm.unsqueeze(-1).expand_as(x)
return x
# from modules.hardnet.models
class HardNet(nn.Module):
def __init__(self,
transform,
coords,
patch_size,
scale,
is_desc256,
orientCorrect=True,
hard_augm=False): # <-- added to take care of the possible nonlocal option managed in modules.hardnet.models
super(HardNet, self).__init__()
self.transform = transform
self.transform_layer = Transformer(transform=transform,
coords=coords,
resolution=patch_size,
SIFTscale=scale)
self.orientCorrect = orientCorrect
self.hard_augm = hard_augm
# model processing patches of size [32 x 32] and giving description vectors of length 2**7
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(128, 128, kernel_size=8, bias=False),
nn.BatchNorm2d(128, affine=False),
)
# initialize weights
self.features.apply(weights_init)
return
def input_norm(self, x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1)
sp = torch.std(flat, dim=1) + 1e-7
return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / \
sp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
# function to forward-propagate inputs through the network
def forward(self, img, theta=None, imgIDs=None):
if theta is None: # suppose patches are directly given (as e.g. for external test data)
patches = img
else: # extract keypoints from the whole image
patches = self.transform_layer([img, theta, imgIDs])
batchSize = patches.shape[0]
if self.hard_augm: # args.hard_augm:
bernoulli = torch.distributions.Bernoulli(torch.tensor([0.5]))
if self.transform == "STN":
# transpose to switch dimensions (only if STN)
transpose = bernoulli.sample(torch.Size([batchSize]))
patches = torch.cat([
torch.transpose(patch, 1, 2) if transpose[pdx] else patch
for pdx, patch in enumerate(patches)
]).unsqueeze(1)
# flip the patches' first dimension
mirrorDim1 = bernoulli.sample(torch.Size([batchSize]))
patches = torch.cat([
torch.flip(patch, [1]) if mirrorDim1[pdx] else patch
for pdx, patch in enumerate(patches)
]).unsqueeze(1)
x_features = self.features(self.input_norm(patches))
x = x_features.view(x_features.size(0), -1)
return L2Norm()(x), patches
def weights_init(m):
'''
Conv2d module weight initialization method
'''
if isinstance(m, nn.Conv2d):
nn.init.orthogonal(m.weight.data, gain=0.6)
try:
nn.init.constant(m.bias.data, 0.01)
except:
pass
return
# interface for pySLAM
class LogpolarFeature2D:
def __init__(self, use_log_polar=True, do_cuda=True):
print('Using LogpolarFeature2D')
self.model_base_path = config.cfg.root_folder + '/thirdparty/logpolar/'
if use_log_polar:
config_path = os.path.join(self.model_base_path, 'configs', 'init_one_example_ptn_96.yml')
if kVerbose:
print('-- Using log-polar model')
else:
config_path = os.path.join(self.model_base_path, 'configs', 'init_one_example_stn_16.yml')
if kVerbose:
print('-- Using cartesian model')
cfg.merge_from_file(config_path)
self.model_weights_path = self.model_base_path + cfg.TEST.MODEL_WEIGHTS # N.B.: this must stay here, after cfg.merge_from_file()
if kVerbose2:
print('model_weights_path:',self.model_weights_path)
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
torch.cuda.manual_seed_all(cfg.TRAINING.SEED)
torch.backends.cudnn.deterministic = True
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
device = torch.device("cuda:0" if self.do_cuda else "cpu")
self.device = device
torch.set_grad_enabled(False)
print('==> Loading pre-trained network.')
self.model = HardNet(transform=cfg.TEST.TRANSFORMER,
coords=cfg.TEST.COORDS,
patch_size=cfg.TEST.IMAGE_SIZE,
scale=cfg.TEST.SCALE,
is_desc256=cfg.TEST.IS_DESC_256,
orientCorrect=cfg.TEST.ORIENT_CORRECTION)
self.checkpoint = torch.load(self.model_weights_path)
self.model.load_state_dict(self.checkpoint['state_dict'])
if self.do_cuda:
self.model.cuda()
print('Extracting on GPU')
else:
print('Extracting on CPU')
self.model = model.cpu()
self.model.eval()
print('==> Successfully loaded pre-trained network.')
def compute_des(self, img, kps):
h, w = img.shape
t = time()
pts = np.array([kp.pt for kp in kps])
scales = np.array([kp.size for kp in kps])
oris = np.array([kp.angle for kp in kps])
# Mirror-pad the image to avoid boundary effects
if any([s > cfg.TEST.PAD_TO for s in img.shape[:2]]):
raise RuntimeError(
"Image exceeds acceptable size ({}x{}), please downsample".format(cfg.TEST.PAD_TO, cfg.TEST.PAD_TO))
fillHeight = cfg.TEST.PAD_TO - img.shape[0]
fillWidth = cfg.TEST.PAD_TO - img.shape[1]
padLeft = int(np.round(fillWidth / 2))
padRight = int(fillWidth - padLeft)
padUp = int(np.round(fillHeight / 2))
padDown = int(fillHeight - padUp)
img = np.pad(img,
pad_width=((padUp, padDown), (padLeft, padRight)),
mode='reflect')
# Normalize keypoint locations
kp_norm = []
for i, p in enumerate(pts):
_p = 2 * np.array([(p[0] + padLeft) / (cfg.TEST.PAD_TO),
(p[1] + padUp) / (cfg.TEST.PAD_TO)]) - 1
kp_norm.append(_p)
theta = [
torch.from_numpy(np.array(kp_norm)).float().squeeze(),
torch.from_numpy(scales).float(),
torch.from_numpy(np.array([np.deg2rad(o) for o in oris])).float()
]
if kVerbose2:
print('-- Padded image from {}x{} to {}x{} in {} s'.format(
h, w, img.shape[0], img.shape[1], time()-t))
# Extract descriptors
t = time()
device = self.device
imgs = torch.from_numpy(img).unsqueeze(0).to(device)
img_keypoints = [theta[0].to(device), theta[1].to(device), theta[2].to(device)]
descriptors, patches = self.model({'img': imgs}, img_keypoints, ['img'] * len(img_keypoints[0]))
if kVerbose2:
print('-- Computed {} descriptors in {:0.2f} sec.'.format(
descriptors.shape[0],
time() - t))
return descriptors.cpu().detach().numpy()
def compute(self, img, kps, mask=None): #mask is a fake input
num_kps = len(kps)
des = []
if num_kps>0:
des = self.compute_des(img, kps)
if kVerbose:
print('descriptor: LOGPOLAR, #features: ', len(kps), ', frame res: ', img.shape[0:2])
return kps, des
| 10,571 | 36.094737 | 147 | py |
pyslam | pyslam-master/feature_geodesc.py | """
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import config
config.cfg.set_lib('geodesc',prepend=True)
import os
import sys
import time
from threading import Thread
from multiprocessing import Queue
import cv2
import numpy as np
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
if False:
import tensorflow as tf
else:
# from https://stackoverflow.com/questions/56820327/the-name-tf-session-is-deprecated-please-use-tf-compat-v1-session-instead
import tensorflow.compat.v1 as tf
# from https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96 to cope with the following error:
# "[...tensorflow/stream_executor/cuda/cuda_dnn.cc:329] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR"
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
# then you must use the config as follows:
# session = tf.Session(config=tf_config, ...)
from utils_tf import load_frozen_model, set_tf_logging
from utils_features import extract_patches_tensor, extract_patches_array, extract_patches_array_cpp
kVerbose = True
# interface for pySLAM
class GeodescFeature2D:
quantize = False # quantize or not output features: if you set this to True then you have a binary descriptor
def __init__(self, do_tf_logging=False):
print('Using GeodescFeature2D')
# mag_factor is how many times the original keypoint scale
# is enlarged to generate a patch from a keypoint
self.mag_factor = 3
# inference batch size
self.batch_size = 512
self.process_all = True # process all the patches at once
self.model_base_path = config.cfg.root_folder + '/thirdparty/geodesc/'
self.model_path = self.model_base_path + 'model/geodesc.pb'
set_tf_logging(do_tf_logging)
print('==> Loading pre-trained network.')
# create deep feature extractor.
self.graph = load_frozen_model(self.model_path, print_nodes=False)
#sess = tf.Session(graph=graph)
print('==> Successfully loaded pre-trained network.')
def process_patches(self, patches):
num_patch = patches.shape[0]
if num_patch % self.batch_size > 0:
loop_num = int(np.floor(float(num_patch) / float(self.batch_size)))
else:
loop_num = int(num_patch / self.batch_size - 1)
with tf.Session(graph=self.graph, config=tf_config) as sess:
def _worker(patch_queue, sess, des):
"""The worker thread."""
while True:
patch_data = patch_queue.get()
if patch_data is None:
return
feat = sess.run("squeeze_1:0", feed_dict={"input:0": np.expand_dims(patch_data, -1)})
des.append(feat)
des = []
patch_queue = Queue()
worker_thread = Thread(target=_worker, args=(patch_queue, sess, des))
worker_thread.daemon = True
worker_thread.start()
start = time.time()
# enqueue
if not self.process_all:
for i in range(loop_num + 1):
if i < loop_num:
patch_queue.put(patches[i * self.batch_size: (i + 1) * self.batch_size])
else:
patch_queue.put(patches[i * self.batch_size:])
else:
patch_queue.put(patches)
# poison pill
patch_queue.put(None)
# wait for extraction.
worker_thread.join()
end = time.time()
if kVerbose:
print('Time cost in feature extraction', end - start)
des = np.concatenate(des, axis=0)
# quantize output features
des = (des * 128 + 128).astype(np.uint8) if self.quantize else des
return des
def compute(self, frame, kps, mask=None): #mask is a fake input
#print('kps: ', kps)
if len(kps)>0:
if False:
# use python code
patches = extract_patches_array(frame, kps, patch_size=32, mag_factor=self.mag_factor)
else:
# use faster cpp code
patches = extract_patches_array_cpp(frame, kps, patch_size=32, mag_factor=self.mag_factor)
patches = np.asarray(patches)
des = self.process_patches(patches)
else:
des = []
if kVerbose:
print('descriptor: GEODESC, #features: ', len(kps), ', frame res: ', frame.shape[0:2])
return kps, des
| 5,637 | 36.337748 | 130 | py |
pyslam | pyslam-master/feature_r2d2.py | """
* This file is part of PYSLAM.
* Adapted from https://raw.githubusercontent.com/naver/r2d2/master/extract.py, see the licence therein.
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
# adapted from from https://raw.githubusercontent.com/naver/r2d2/master/extract.py
import config
config.cfg.set_lib('r2d2')
import os, pdb
from PIL import Image
import numpy as np
import torch
import cv2
from threading import RLock
from r2d2.tools import common
from r2d2.tools.dataloader import norm_RGB
from r2d2.nets.patchnet import *
import argparse
from utils_sys import Printer
kVerbose = True
def load_network(model_fn):
checkpoint = torch.load(model_fn)
print("\n>> Creating net = " + checkpoint['net'])
net = eval(checkpoint['net'])
nb_of_weights = common.model_size(net)
print(f" ( Model size: {nb_of_weights/1000:.0f}K parameters )")
# initialization
weights = checkpoint['state_dict']
net.load_state_dict({k.replace('module.',''):v for k,v in weights.items()})
return net.eval()
class NonMaxSuppression (torch.nn.Module):
def __init__(self, rel_thr=0.7, rep_thr=0.7):
nn.Module.__init__(self)
self.max_filter = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.rel_thr = rel_thr
self.rep_thr = rep_thr
def forward(self, reliability, repeatability, **kw):
assert len(reliability) == len(repeatability) == 1
reliability, repeatability = reliability[0], repeatability[0]
# local maxima
maxima = (repeatability == self.max_filter(repeatability))
# remove low peaks
maxima *= (repeatability >= self.rep_thr)
maxima *= (reliability >= self.rel_thr)
return maxima.nonzero().t()[2:4]
def extract_multiscale( net, img, detector, scale_f=2**0.25,
min_scale=0.0, max_scale=1,
min_size=256, max_size=1024,
verbose=False):
old_bm = torch.backends.cudnn.benchmark
torch.backends.cudnn.benchmark = False # speedup
# extract keypoints at multiple scales
B, three, H, W = img.shape
assert B == 1 and three == 3, "should be a batch with a single RGB image"
assert max_scale <= 1
s = 1.0 # current scale factor
level = 0
L = []
X,Y,S,C,Q,D = [],[],[],[],[],[]
while s+0.001 >= max(min_scale, min_size / max(H,W)):
if s-0.001 <= min(max_scale, max_size / max(H,W)):
nh, nw = img.shape[2:]
if verbose: print(f"extracting at scale x{s:.02f} = {nw:4d}x{nh:3d} - level {level}")
# extract descriptors
with torch.no_grad():
res = net(imgs=[img])
# get output and reliability map
descriptors = res['descriptors'][0]
reliability = res['reliability'][0]
repeatability = res['repeatability'][0]
# normalize the reliability for nms
# extract maxima and descs
y,x = detector(**res) # nms
c = reliability[0,0,y,x]
q = repeatability[0,0,y,x]
d = descriptors[0,:,y,x].t()
n = d.shape[0]
# accumulate multiple scales
X.append(x.float() * W/nw)
Y.append(y.float() * H/nh)
S.append((32/s) * torch.ones(n, dtype=torch.float32, device=d.device))
C.append(c)
Q.append(q)
D.append(d)
L_tmp =level * np.ones(n,dtype=np.int32)
L = np.concatenate((L, L_tmp), axis=0).astype(np.int32)
level += 1
s /= scale_f
# down-scale the image for next iteration
nh, nw = round(H*s), round(W*s)
img = F.interpolate(img, (nh,nw), mode='bilinear', align_corners=False)
# restore value
torch.backends.cudnn.benchmark = old_bm
Y = torch.cat(Y)
X = torch.cat(X)
S = torch.cat(S) # scale
scores = torch.cat(C) * torch.cat(Q) # scores = reliability * repeatability
XYS = torch.stack([X,Y,S], dim=-1)
D = torch.cat(D)
return XYS, D, scores, L
# convert matrix of pts into list of keypoints
def convert_pts_to_keypoints(pts, scores, sizes, levels):
assert(len(pts)==len(scores))
kps = []
if pts is not None:
# convert matrix [Nx2] of pts into list of keypoints
kps = [ cv2.KeyPoint(p[0], p[1], _size=sizes[i], _response=scores[i], _octave=levels[i]) for i,p in enumerate(pts) ]
return kps
# TODO: fix the octave field of the output keypoints
# interface for pySLAM
class R2d2Feature2D:
def __init__(self,
num_features = 2000,
scale_f = 2**0.25,
min_size = 256,
max_size = 1300, #1024,
min_scale = 0,
max_scale = 1,
reliability_thr = 0.7,
repeatability_thr = 0.7,
do_cuda=True):
print('Using R2d2Feature2D')
self.lock = RLock()
self.model_base_path = config.cfg.root_folder + '/thirdparty/r2d2'
self.model_weights_path = self.model_base_path + '/models/r2d2_WASF_N16.pt'
#print('model_weights_path:',self.model_weights_path)
self.pts = []
self.kps = []
self.des = []
self.frame = None
self.num_features = num_features
self.scale_f = scale_f
self.min_size = min_size
self.max_size = max_size
self.min_scale = min_scale
self.max_scale = max_scale
self.reliability_thr = reliability_thr
self.repeatability_thr = repeatability_thr
self.do_cuda = do_cuda
if do_cuda:
gpus = [0]
else:
gpus = -1
self.gpus = gpus
self.do_cuda = common.torch_set_gpu(gpus)
print('==> Loading pre-trained network.')
self.net = load_network(self.model_weights_path)
if self.do_cuda: self.net = self.net.cuda()
# create the non-maxima detector
self.detector = NonMaxSuppression(rel_thr=reliability_thr, rep_thr=repeatability_thr)
print('==> Successfully loaded pre-trained network.')
def compute_kps_des(self,img):
with self.lock:
H, W = img.shape[:2]
img = norm_RGB(img)[None]
if self.do_cuda: img = img.cuda()
# extract keypoints/descriptors for a single image
xys, desc, scores, levels = extract_multiscale(self.net, img, self.detector,
scale_f = self.scale_f,
min_scale = self.min_scale,
max_scale = self.max_scale,
min_size = self.min_size,
max_size = self.max_size,
verbose = kVerbose)
xys = xys.cpu().numpy()
desc = desc.cpu().numpy()
scores = scores.cpu().numpy()
idxs = scores.argsort()[-self.num_features or None:]
selected_xys = xys[idxs]
self.pts = selected_xys[:,:2]
sizes = selected_xys[:,2]
des = desc[idxs]
scores = scores[idxs]
levels = np.array(levels)[idxs]
kps = convert_pts_to_keypoints(self.pts, scores, sizes, levels)
return kps, des
def detectAndCompute(self, frame, mask=None): #mask is a fake input
with self.lock:
self.frame = frame
self.kps, self.des = self.compute_kps_des(frame)
if kVerbose:
print('detector: R2D2 , descriptor: R2D2 , #features: ', len(self.kps), ', frame res: ', frame.shape[0:2])
return self.kps, self.des
# return keypoints if available otherwise call detectAndCompute()
def detect(self, frame, mask=None): # mask is a fake input
with self.lock:
if self.frame is not frame:
self.detectAndCompute(frame)
return self.kps
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
Printer.orange('WARNING: R2D2 is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, self.des
| 9,607 | 35.393939 | 146 | py |
pyslam | pyslam-master/feature_tfeat.py | """
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import config
config.cfg.set_lib('tfeat')
import os
import numpy as np
import math
import cv2
import time
import torchvision as tv
import torch
import tfeat_model
import tfeat_utils
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from utils_features import extract_patches_array, extract_patches_array_cpp
kVerbose = True
# interface for pySLAM
class TfeatFeature2D:
def __init__(self, do_cuda=True):
print('Using TfeatFeature2D')
self.model_base_path = config.cfg.root_folder + '/thirdparty/tfeat/'
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
device = torch.device("cuda:0" if self.do_cuda else "cpu")
torch.set_grad_enabled(False)
# mag_factor is how many times the original keypoint scale
# is enlarged to generate a patch from a keypoint
self.mag_factor = 3
print('==> Loading pre-trained network.')
#init tfeat and load the trained weights
self.model = tfeat_model.TNet()
self.models_path = self.model_base_path + 'pretrained-models'
self.net_name = 'tfeat-liberty'
self.model.load_state_dict(torch.load(os.path.join(self.models_path,self.net_name+".params")))
if self.do_cuda:
self.model.cuda()
print('Extracting on GPU')
else:
print('Extracting on CPU')
self.model = model.cpu()
self.model.eval()
print('==> Successfully loaded pre-trained network.')
def compute_des(self, patches):
patches = torch.from_numpy(patches).float()
patches = torch.unsqueeze(patches,1)
if self.do_cuda:
patches = patches.cuda()
with torch.no_grad():
descrs = self.model(patches)
return descrs.detach().cpu().numpy().reshape(-1, 128)
def compute(self, frame, kps, mask=None): #mask is a fake input
#print('kps: ', kps)
if len(kps)>0:
#des = tfeat_utils.describe_opencv(self.model, frame, kps, 32, self.mag_factor)
# extract the keypoint patches
#t = time.time()
if False:
# use python code
patches = extract_patches_array(frame, kps, patch_size=32, mag_factor=self.mag_factor)
else:
# use faster cpp code
patches = extract_patches_array_cpp(frame, kps, patch_size=32, mag_factor=self.mag_factor)
patches = np.asarray(patches)
#if kVerbose:
# print('patches.shape:',patches.shape)
#if kVerbose:
# print('patch elapsed: ', time.time()-t)
# compute descriptor by feeeding the full patch tensor to the network
des = self.compute_des(patches)
else:
des = []
if kVerbose:
print('descriptor: TFEAT, #features: ', len(kps), ', frame res: ', frame.shape[0:2])
return kps, des
| 3,989 | 34.625 | 114 | py |
pyslam | pyslam-master/feature_disk.py | """
* This file is part of PYSLAM
* Adapted from https://github.com/cvlab-epfl/disk/blob/master/detect.py, see licence therein.
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
# adapted from https://github.com/cvlab-epfl/disk/blob/master/detect.py
import sys
import config
config.cfg.set_lib('disk')
config.cfg.set_lib('torch-dimcheck')
config.cfg.set_lib('torch-localize')
config.cfg.set_lib('unets')
import cv2
from threading import RLock
from utils_sys import Printer
import torch, h5py, imageio, os, argparse
import numpy as np
import torch.nn.functional as F
from functools import partial
from torch.utils.data import DataLoader
from tqdm import tqdm
from torch_dimcheck import dimchecked
from disk import DISK, Features
from utils_sys import Printer, is_opencv_version_greater_equal
kVerbose = True
class Image:
def __init__(self, bitmap: ['C', 'H', 'W'], fname: str, orig_shape=None):
self.bitmap = bitmap
self.fname = fname
if orig_shape is None:
self.orig_shape = self.bitmap.shape[1:]
else:
self.orig_shape = orig_shape
def resize_to(self, shape):
return Image(
self._pad(self._interpolate(self.bitmap, shape), shape),
self.fname,
orig_shape=self.bitmap.shape[1:],
)
@dimchecked
def to_image_coord(self, xys: [2, 'N']) -> ([2, 'N'], ['N']):
f, _size = self._compute_interpolation_size(self.bitmap.shape[1:])
scaled = xys / f
h, w = self.orig_shape
x, y = scaled
mask = (0 <= x) & (x < w) & (0 <= y) & (y < h)
return scaled, mask
def _compute_interpolation_size(self, shape):
x_factor = self.orig_shape[0] / shape[0]
y_factor = self.orig_shape[1] / shape[1]
f = 1 / max(x_factor, y_factor)
if x_factor > y_factor:
new_size = (shape[0], int(f * self.orig_shape[1]))
else:
new_size = (int(f * self.orig_shape[0]), shape[1])
return f, new_size
@dimchecked
def _interpolate(self, image: ['C', 'H', 'W'], shape) -> ['C', 'h', 'w']:
_f, size = self._compute_interpolation_size(shape)
return F.interpolate(
image.unsqueeze(0),
size=size,
mode='bilinear',
align_corners=False,
).squeeze(0)
@dimchecked
def _pad(self, image: ['C', 'H', 'W'], shape) -> ['C', 'h', 'w']:
x_pad = shape[0] - image.shape[1]
y_pad = shape[1] - image.shape[2]
if x_pad < 0 or y_pad < 0:
raise ValueError("Attempting to pad by negative value")
return F.pad(image, (0, y_pad, 0, x_pad))
class ImageAdapter:
def __init__(self, image, crop_size=(None, None)):
self.image = image
self.crop_size = crop_size
def get(self):
# name = self.names[ix]
# path = os.path.join(self.image_path, name)
# img = np.ascontiguousarray(imageio.imread(path))
# tensor = torch.from_numpy(img).to(torch.float32)
img = np.ascontiguousarray(self.image)
tensor = torch.from_numpy(img).to(torch.float32)
if len(tensor.shape) == 2: # some images may be grayscale
tensor = tensor.unsqueeze(-1).expand(-1, -1, 3)
bitmap = tensor.permute(2, 0, 1) / 255.
#extensionless_fname = os.path.splitext(name)[0]
image = Image(bitmap, '')
if self.crop_size != (None, None):
image = image.resize_to(self.crop_size)
return image
def stack(self):
images = [self.get()]
bitmaps = torch.stack([im.bitmap for im in images], dim=0)
return bitmaps, images
# convert matrix of pts into list of keypoints
def convert_pts_to_keypoints(pts, scores, size):
assert(len(pts)==len(scores))
kps = []
if pts is not None:
# convert matrix [Nx2] of pts into list of keypoints
if is_opencv_version_greater_equal(4,5,3):
kps = [ cv2.KeyPoint(p[0], p[1], size=size, response=s, octave=0) for p,s in zip(pts,scores) ]
else:
kps = [ cv2.KeyPoint(p[0], p[1], _size=size, _response=s, _octave=0) for p,s in zip(pts,scores) ]
return kps
# convert matrix of pts into list of keypoints
def convert_pts_to_keypoints_with_translation(pts, scores, size, deltax, deltay):
assert(len(pts)==len(scores))
kps = []
if pts is not None:
# convert matrix [Nx2] of pts into list of keypoints
if is_opencv_version_greater_equal(4,5,3):
kps = [ cv2.KeyPoint(p[0]+deltax, p[1]+deltay, size=size, response=s, octave=0) for p,s in zip(pts,scores) ]
else:
kps = [ cv2.KeyPoint(p[0]+deltax, p[1]+deltay, _size=size, _response=s, _octave=0) for p,s in zip(pts,scores) ]
return kps
# interface for pySLAM
# NOTE: from Fig. 3 in the paper "DISK: Learning local features with policy gradient"
# "Our approach can match many more points and produce more accurate poses. It can deal with large changes in scale (4th and 5th columns) but not in rotation..."
class DiskFeature2D:
def __init__(self,
num_features=2000,
nms_window_size=5, # NMS windows size
desc_dim=128, # descriptor dimension. Needs to match the checkpoint value
mode = 'nms', # choices=['nms', 'rng'], Whether to extract features using the non-maxima suppresion mode or through training-time grid sampling technique'
do_cuda=True):
print('Using DiskFeature2D')
self.lock = RLock()
self.num_features = num_features
self.nms_window_size = nms_window_size
self.desc_dim = desc_dim
self.mode = mode
self.model_base_path = config.cfg.root_folder + '/thirdparty/disk/depth-save.pth'
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
self.DEV = torch.device('cuda' if self.do_cuda else 'cpu')
self.CPU = torch.device('cpu')
self.state_dict = torch.load(self.model_base_path, map_location='cpu')
# compatibility with older model saves which used the 'extractor' name
if 'extractor' in self.state_dict:
weights = self.state_dict['extractor']
elif 'disk' in self.state_dict:
weights = self.state_dict['disk']
else:
raise KeyError('Incompatible weight file!')
self.disk = DISK(window=8, desc_dim=desc_dim)
print('==> Loading pre-trained network.')
self.disk.load_state_dict(weights)
self.model = self.disk.to(self.DEV)
print('==> Successfully loaded pre-trained network.')
self.keypoint_size = 8 # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint
self.pts = []
self.kps = []
self.des = []
self.scales = []
self.scores = []
self.frame = None
self.use_crop = False
self.cropx = [0,0] # [startx, endx]
self.cropy = [0,0] # [starty, endy]
def crop_center(self,img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def extract(self, image):
if self.mode == 'nms':
extract = partial(
self.model.features,
kind='nms',
window_size=self.nms_window_size,
cutoff=0.,
n=self.num_features
)
else:
extract = partial(model.features, kind='rng')
self.use_crop = False
height, width, channels = image.shape
cropx = width % 16
cropy = height % 16
if cropx != 0 or cropy !=0:
self.use_crop = True
half_cropx = cropx //2
rest_cropx = cropx %2
half_cropy = cropy //2
rest_cropy = cropy %2
self.cropx = [half_cropx, width-(half_cropx+rest_cropx)]
self.cropy = [half_cropy, height-(half_cropy+rest_cropy)]
cropped_image = image[self.cropy[0]:self.cropy[1],self.cropx[0]:self.cropx[1],:]
image_adapter = ImageAdapter(cropped_image)
else:
image_adapter = ImageAdapter(image)
bitmaps, images = image_adapter.stack()
bitmaps = bitmaps.to(self.DEV, non_blocking=True)
with torch.no_grad():
try:
batched_features = extract(bitmaps)
except RuntimeError as e:
if 'U-Net failed' in str(e):
msg = ('Please use input size which is multiple of 16 (or '
'adjust the --height and --width flags to let this '
'script rescale it automatically). This is because '
'we internally use a U-Net with 4 downsampling '
'steps, each by a factor of 2, therefore 2^4=16.')
raise RuntimeError(msg) from e
else:
raise
for features, image in zip(batched_features.flat, images):
features = features.to(self.CPU)
kps_crop_space = features.kp.T
kps_img_space, mask = image.to_image_coord(kps_crop_space)
keypoints = kps_img_space.numpy().T[mask]
descriptors = features.desc.numpy()[mask]
scores = features.kp_logp.numpy()[mask]
order = np.argsort(scores)[::-1]
keypoints = keypoints[order]
descriptors = descriptors[order]
scores = scores[order]
assert descriptors.shape[1] == self.desc_dim
assert keypoints.shape[1] == 2
return keypoints, descriptors, scores
def compute_kps_des(self, im):
with self.lock:
keypoints, descriptors, scores = self.extract(im)
#print('scales:',self.scales)
if self.use_crop:
self.kps = convert_pts_to_keypoints(keypoints, scores, self.keypoint_size)
else:
self.kps = convert_pts_to_keypoints_with_translation(keypoints, scores, self.keypoint_size, self.cropx[0], self.cropy[0])
return self.kps, descriptors
def detectAndCompute(self, frame, mask=None): #mask is a fake input
with self.lock:
self.frame = frame
self.kps, self.des = self.compute_kps_des(frame)
if kVerbose:
print('detector: DISK, descriptor: DISK, #features: ', len(self.kps), ', frame res: ', frame.shape[0:2])
return self.kps, self.des
# return keypoints if available otherwise call detectAndCompute()
def detect(self, frame, mask=None): # mask is a fake input
with self.lock:
#if self.frame is not frame:
self.detectAndCompute(frame)
return self.kps
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
Printer.orange('WARNING: DISK is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, self.des
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
#Printer.orange('WARNING: DISK is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, self.des
| 12,929 | 37.254438 | 181 | py |
pyslam | pyslam-master/thirdparty/l2net_keras/src/LRN.py | from keras import backend as K
from keras.layers.core import Layer
# from https://github.com/ckoren1975/Machine-learning/blob/master/googlenet_custom_layers.py
# except channels have been moved from the 2nd position to the 4th postion
# and shape of input vector is now a tensor operation
# and default args are set to L2-net params
class LRN(Layer):
def __init__(self, alpha=256,k=0,beta=0.5,n=256, **kwargs):
super(LRN, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def call(self, x, mask=None):
s = K.shape(x)
b = s[0]
r = s[1]
c = s[2]
ch = s[3]
half_n = self.n // 2 # half the local region
input_sqr = K.square(x) # square the input
extra_channels = K.zeros((b, r, c, ch + 2 * half_n))
input_sqr = K.concatenate([extra_channels[:, :, :, :half_n],input_sqr, extra_channels[:, :, :, half_n + ch:]], axis = 3)
scale = self.k # offset for the scale
norm_alpha = self.alpha / self.n # normalized alpha
for i in range(self.n):
scale += norm_alpha * input_sqr[:, :, :, i:i+ch]
scale = scale ** self.beta
x = x / scale
return x
def get_config(self):
config = {"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 1,616 | 31.34 | 132 | py |
pyslam | pyslam-master/thirdparty/l2net_keras/src/L2_Net.py | import os
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization, ZeroPadding2D, Lambda
# from https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model, save_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.optimizers import Adam
from keras.backend.tensorflow_backend import set_session
if False:
import tensorflow as tf
else:
# from https://stackoverflow.com/questions/56820327/the-name-tf-session-is-deprecated-please-use-tf-compat-v1-session-instead
import tensorflow.compat.v1 as tf
from utils_tf import set_tf_logging
import pickle
import numpy as np
from LRN import LRN
import cv2
# get the location of this file!
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def build_cnn(weights):
model = Sequential()
model.add(ZeroPadding2D(1, input_shape=(32,32, 1)))
model.add(Conv2D(32, kernel_size=(3, 3)))
model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False))
model.add(Lambda(K.relu))
model.add(ZeroPadding2D(1))
model.add(Conv2D(32, kernel_size=(3, 3)))
model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False))
model.add(Lambda(K.relu))
model.add(ZeroPadding2D(1))
model.add(Conv2D(64, kernel_size=(3, 3), strides=2))
model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False))
model.add(Lambda(K.relu))
model.add(ZeroPadding2D(1))
model.add(Conv2D(64, kernel_size=(3, 3)))
model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False))
model.add(Lambda(K.relu))
model.add(ZeroPadding2D(1))
model.add(Conv2D(128, kernel_size=(3, 3), strides=2))
model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False))
model.add(Lambda(K.relu))
model.add(ZeroPadding2D(1))
model.add(Conv2D(128, kernel_size=(3, 3)))
model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False))
model.add(Lambda(K.relu))
model.add(Conv2D(128, kernel_size=(8, 8)))
model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False))
model.add(LRN(alpha=256,k=0,beta=0.5,n=256))
model.set_weights(weights)
return model
def build_L2_net(net_name):
python_net_data = pickle.load(open(__location__ + "/../python_net_data/" + net_name + ".p", "rb"))
return build_cnn(python_net_data['weights']), build_cnn(python_net_data['weights_cen']), python_net_data['pix_mean'], python_net_data['pix_mean_cen']
def cal_L2Net_des(net_name, testPatchs, flagCS = False):
"""
Get descriptors for one or more patches
Parameters
----------
net_name : string
One of "L2Net-HP", "L2Net-HP+", "L2Net-LIB", "L2Net-LIB+", "L2Net-ND", "L2Net-ND+", "L2Net-YOS", "L2Net-YOS+",
testPatchs : array
A numpy array of image data with deimensions (?, 32, 32, 1), or if using central-surround with deimensions (?, 64, 64, 1)
flagCS : boolean
If True, use central-surround network
Returns
-------
descriptor
Numpy array with size (?, 128) or if using central-surround (?, 256)
"""
model, model_cen, pix_mean, pix_mean_cen = build_L2_net(net_name)
# print(model.summary())
# print(model_cen.summary())
if flagCS:
testPatchsCen = testPatchs[:,16:48,16:48,:]
testPatchsCen = testPatchsCen - pix_mean_cen
testPatchsCen = np.array([(testPatchsCen[i] - np.mean(testPatchsCen[i]))/(np.std(testPatchsCen[i]) + 1e-12) for i in range(0, testPatchsCen.shape[0])])
testPatchs = np.array([cv2.resize(testPatchs[i], (32,32), interpolation = cv2.INTER_CUBIC) for i in range(0, testPatchs.shape[0])])
testPatchs = np.expand_dims(testPatchs, axis=-1)
testPatchs = testPatchs - pix_mean
testPatchs = np.array([(testPatchs[i] - np.mean(testPatchs[i]))/(np.std(testPatchs[i]) + 1e-12) for i in range(0, testPatchs.shape[0])])
res = np.reshape(model.predict(testPatchs), (testPatchs.shape[0], 128))
if flagCS:
resCen = np.reshape(model_cen.predict(testPatchsCen), (testPatchs.shape[0], 128))
return np.concatenate((res, resCen), 1)
else:
return res
class L2Net:
def __init__(self, net_name, do_tf_logging=True, flagCS = False):
set_tf_logging(do_tf_logging)
# from https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
#config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
model, model_cen, pix_mean, pix_mean_cen = build_L2_net(net_name)
self.flagCS = flagCS
self.model = model
self.model_cen = model_cen
self.pix_mean = pix_mean
self.pix_mean_cen = pix_mean_cen
def calc_descriptors(self, patches):
if self.flagCS:
patchesCen = patches[:,16:48,16:48,:]
patchesCen = patchesCen - self.pix_mean_cen
patchesCen = np.array([(patchesCen[i] - np.mean(patchesCen[i]))/(np.std(patchesCen[i]) + 1e-12) for i in range(0, patchesCen.shape[0])])
patches = np.array([cv2.resize(patches[i], (32,32), interpolation = cv2.INTER_CUBIC) for i in range(0, patches.shape[0])])
patches = np.expand_dims(patches, axis=-1)
patches = patches - self.pix_mean
patches = np.array([(patches[i] - np.mean(patches[i]))/(np.std(patches[i]) + 1e-12) for i in range(0, patches.shape[0])])
res = np.reshape(self.model.predict(patches), (patches.shape[0], 128))
if self.flagCS:
resCen = np.reshape(self.model_cen.predict(patchesCen), (patches.shape[0], 128))
return np.concatenate((res, resCen), 1)
else:
return res
# data = np.full((1,64,64,1), 0.)
# result = cal_L2Net_des("L2Net-HP+", data, flagCS=True)
# print(result) | 6,657 | 34.227513 | 159 | py |
pyslam | pyslam-master/thirdparty/l2net/l2net_model.py | import torch
import torch.nn.init
import torch.nn as nn
eps = 1e-10
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L2Net(nn.Module):
def __init__(self):
super(L2Net, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(32, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(32, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=True),
nn.BatchNorm2d(64, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(64, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=True),
nn.BatchNorm2d(128, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(128, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=8, bias=True),
nn.BatchNorm2d(128, affine=True, eps=eps),
)
return
def input_norm(self, x):
# matlab norm
z = x.contiguous().transpose(2, 3).contiguous().view(x.size(0),-1)
x_minus_mean = z.transpose(0,1)-z.mean(1)
sp = torch.std(z,1).detach()
norm_inp = x_minus_mean/(sp+1e-12)
norm_inp = norm_inp.transpose(0, 1).view(-1, 1, x.size(2), x.size(3)).transpose(2,3)
return norm_inp
def forward(self, input):
norm_img = self.input_norm(input)
x_features = self.features(norm_img)
return nn.LocalResponseNorm(256,1*256,0.5,0.5)(x_features).view(input.size(0),-1)
| 2,081 | 34.288136 | 92 | py |
pyslam | pyslam-master/thirdparty/l2net/convert_l2net_weights_matconv_pytorch.py | import numpy as np
import scipy.io as sio
import torch
import torch.nn.init
from misc.l2net.l2net_model import L2Net
eps = 1e-10
def check_ported(l2net_model, test_patch, img_mean):
test_patch = test_patch.transpose(3, 2, 0, 1)-img_mean
desc = l2net_model(torch.from_numpy(test_patch))
print(desc)
return desc
if __name__ == '__main__':
path_to_l2net_weights = '/cvlabsrc1/cvlab/datasets_anastasiia/descriptors/sfm-evaluation-benchmarking/third_party/l2net/matlab/L2Net-LIB+.mat'
l2net_weights = sio.loadmat(path_to_l2net_weights)
l2net_model = L2Net()
l2net_model.eval()
new_state_dict = l2net_model.state_dict().copy()
conv_layers, bn_layers = {}, {}
all_layer_weights = l2net_weights['net']['layers'][0][0][0]
img_mean = l2net_weights['pixMean']
conv_layers_to_track, bn_layers_to_track = [0,3,6,9,12,15,18], \
[1,4,7,10,13,16,19]
conv_i, bn_i = 0,0
for layer in all_layer_weights:
if 'weights' not in layer.dtype.names:
continue
layer_name = layer[0][0][0][0]
layer_value = layer['weights'][0][0][0]
if layer_name == 'conv':
conv_layers[conv_layers_to_track[conv_i]] = layer_value
conv_i+=1
elif layer_name == 'bnormPair':
bn_layers[bn_layers_to_track[bn_i]] = layer_value
bn_i+=1
for key, value in new_state_dict.items():
layer_number = int(key.split('.')[1])
if layer_number in conv_layers.keys():
if 'weight' in key:
new_state_dict[key] = torch.from_numpy(conv_layers[layer_number][0].transpose((3,2,0,1)))
elif 'bias' in key:
new_state_dict[key] = torch.from_numpy(conv_layers[layer_number][1]).squeeze()
elif layer_number in bn_layers.keys():
if 'running_mean' in key:
new_state_dict[key] = torch.from_numpy(np.array([x[0] for x in bn_layers[layer_number][2]])).squeeze()
elif 'running_var' in key:
new_state_dict[key] = torch.from_numpy(np.array([x[1] for x in bn_layers[layer_number][2]] )** 2 -eps).squeeze()
elif 'weight' in key:
new_state_dict[key] = torch.from_numpy(np.ones(value.size()[0])).squeeze()
else:
continue
l2net_model.load_state_dict(new_state_dict)
l2net_model.eval()
torch.save(l2net_model.state_dict(),'l2net_ported_weights_lib+.pth')
# compare desc on test patch with matlab implementation
# test_patch_batch = sio.loadmat('test_batch_img.mat')['testPatch']
# check_ported(l2net_model, test_patch_batch, img_mean)
#
# test_patch_one = sio.loadmat('test_one.mat')['testPatch']
# check_ported(l2net_model, np.expand_dims(np.expand_dims(test_patch_one, axis=2),axis=2), img_mean) | 2,854 | 37.066667 | 146 | py |
pyslam | pyslam-master/thirdparty/contextdesc/models/cnn_wrapper/network.py | #!/usr/bin/env python3
"""
Copyright 2017, Zixin Luo, HKUST.
CNN layer wrapper.
Please be noted about following issues:
1. The center and scale paramter are disabled by default for all BN-related layers, as they have
shown little influence on final performance. In particular, scale params is officially considered
unnecessary as oftentimes followed by ReLU.
2. By default we apply L2 regularization only on kernel or bias parameters, but not learnable BN
coefficients (i.e. center/scale) as suggested in ResNet paper. Be noted to add regularization terms
into tf.GraphKeys.REGULARIZATION_LOSSES if you are desgining custom layers.
3. Since many of models are converted from Caffe, we are by default setting the epsilon paramter in
BN to 1e-5 as that is in Caffe, while 1e-3 in TensorFlow. It may cause slightly different behavior
if you are using models from other deep learning toolboxes.
"""
import numpy as np
import tensorflow as tf
# Zero padding in default. 'VALID' gives no padding.
DEFAULT_PADDING = 'SAME'
def caffe_like_padding(input_tensor, padding):
"""A padding method that has same behavior as Caffe's."""
def PAD(x): return [x, x]
if len(input_tensor.get_shape()) == 4:
padded_input = tf.pad(input_tensor,
[PAD(0), PAD(padding), PAD(padding), PAD(0)], "CONSTANT")
elif len(input_tensor.get_shape()) == 5:
padded_input = tf.pad(input_tensor,
[PAD(0), PAD(padding), PAD(padding), PAD(padding), PAD(0)],
"CONSTANT")
return padded_input
def layer(op):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
"""Layer decoration."""
# We allow to construct low-level layers instead of high-level networks.
if self.inputs is None or (len(args) > 0 and isinstance(args[0], tf.Tensor)):
layer_output = op(self, *args, **kwargs)
return layer_output
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if not self.terminals:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
"""Class NetWork."""
def __init__(self, inputs, is_training,
dropout_rate=0.5, seed=None, epsilon=1e-5, reuse=False, fcn=True, regularize=True,
**kwargs):
# The input nodes for this network
self.inputs = inputs
# If true, the resulting variables are set as trainable
self.trainable = is_training if isinstance(is_training, bool) else True
# If true, variables are shared between feature towers
self.reuse = reuse
# If true, layers like batch normalization or dropout are working in training mode
self.training = is_training
# Dropout rate
self.dropout_rate = dropout_rate
# Seed for randomness
self.seed = seed
# Add regularizer for parameters.
self.regularizer = tf.contrib.layers.l2_regularizer(1.0) if regularize else None
# The epsilon paramater in BN layer.
self.bn_epsilon = epsilon
self.extra_args = kwargs
# Endpoints.
self.endpoints = []
if inputs is not None:
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, dense layers will be omitted in network construction
self.fcn = fcn
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False, exclude_var=None):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item()
if exclude_var is not None:
keyword = exclude_var.split(',')
assign_op = []
for op_name in data_dict:
if exclude_var is not None:
find_keyword = False
for tmp_keyword in keyword:
if op_name.find(tmp_keyword) >= 0:
find_keyword = True
if find_keyword:
continue
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
try:
var = tf.get_variable(param_name)
assign_op.append(var.assign(data))
except ValueError:
if not ignore_missing:
raise
else:
print(Notify.WARNING, ':'.join(
[op_name, param_name]), "is omitted.", Notify.ENDC)
session.run(assign_op)
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert args
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_output_by_name(self, layer_name):
'''
Get graph node by layer name
:param layer_name: layer name string
:return: tf node
'''
return self.layers[layer_name]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def change_inputs(self, input_tensors):
assert len(input_tensors) == 1
for key in input_tensors:
self.layers[key] = input_tensors[key]
@layer
def conv(self,
input_tensor,
kernel_size,
filters,
strides,
name,
relu=True,
dilation_rate=1,
padding=DEFAULT_PADDING,
biased=True,
reuse=False,
kernel_init=None,
bias_init=tf.zeros_initializer,
separable=False):
"""2D/3D convolution."""
kwargs = {'filters': filters,
'kernel_size': kernel_size,
'strides': strides,
'activation': tf.nn.relu if relu else None,
'use_bias': biased,
'dilation_rate': dilation_rate,
'trainable': self.trainable,
'reuse': self.reuse or reuse,
'bias_regularizer': self.regularizer if biased else None,
'kernel_initializer': kernel_init,
'bias_initializer': bias_init,
'name': name}
if separable:
kwargs['depthwise_regularizer'] = self.regularizer
kwargs['pointwise_regularizer'] = self.regularizer
else:
kwargs['kernel_regularizer'] = self.regularizer
if isinstance(padding, str):
padded_input = input_tensor
kwargs['padding'] = padding
else:
padded_input = caffe_like_padding(input_tensor, padding)
kwargs['padding'] = 'VALID'
if len(input_tensor.get_shape()) == 4:
if not separable:
return tf.compat.v1.layers.conv2d(padded_input, **kwargs)
else:
return tf.layers.separable_conv2d(padded_input, **kwargs)
elif len(input_tensor.get_shape()) == 5:
if not separable:
return tf.layers.conv3d(padded_input, **kwargs)
else:
raise NotImplementedError('No official implementation for separable_conv3d')
else:
raise ValueError('Improper input rank for layer: ' + name)
@layer
def conv_bn(self,
input_tensor,
kernel_size,
filters,
strides,
name,
relu=True,
center=False,
scale=False,
dilation_rate=1,
padding=DEFAULT_PADDING,
biased=False,
separable=False,
reuse=False):
conv = self.conv(input_tensor, kernel_size, filters, strides, name, relu=False,
dilation_rate=dilation_rate, padding=padding,
biased=biased, reuse=reuse, separable=separable)
conv_bn = self.batch_normalization(conv, name + '/bn',
center=center, scale=scale, relu=relu, reuse=reuse)
return conv_bn
@layer
def deconv(self,
input_tensor,
kernel_size,
filters,
strides,
name,
relu=True,
padding=DEFAULT_PADDING,
biased=True,
reuse=False):
"""2D/3D deconvolution."""
kwargs = {'filters': filters,
'kernel_size': kernel_size,
'strides': strides,
'activation': tf.nn.relu if relu else None,
'use_bias': biased,
'trainable': self.trainable,
'reuse': self.reuse or reuse,
'kernel_regularizer': self.regularizer,
'bias_regularizer': self.regularizer if biased else None,
'name': name}
if isinstance(padding, str):
padded_input = input_tensor
kwargs['padding'] = padding
else:
padded_input = caffe_like_padding(input_tensor, padding)
kwargs['padding'] = 'VALID'
if len(input_tensor.get_shape()) == 4:
return tf.layers.conv2d_transpose(padded_input, **kwargs)
elif len(input_tensor.get_shape()) == 5:
return tf.layers.conv3d_transpose(padded_input, **kwargs)
else:
raise ValueError('Improper input rank for layer: ' + name)
@layer
def deconv_bn(self,
input_tensor,
kernel_size,
filters,
strides,
name,
relu=True,
center=False,
scale=False,
padding=DEFAULT_PADDING,
biased=False,
reuse=False):
deconv = self.deconv(input_tensor, kernel_size, filters, strides, name,
relu=False, padding=padding, biased=biased, reuse=reuse)
deconv_bn = self.batch_normalization(deconv, name + '/bn',
center=center, scale=scale, relu=relu, reuse=reuse)
return deconv_bn
@layer
def relu(self, input_tensor, name=None):
"""ReLu activation."""
return tf.nn.relu(input_tensor, name=name)
@layer
def max_pool(self, input_tensor, pool_size, strides, name, padding=DEFAULT_PADDING):
"""Max pooling."""
if isinstance(padding, str):
padded_input = input_tensor
padding_type = padding
else:
padded_input = caffe_like_padding(input_tensor, padding)
padding_type = 'VALID'
return tf.layers.max_pooling2d(padded_input,
pool_size=pool_size,
strides=strides,
padding=padding_type,
name=name)
@layer
def avg_pool(self, input_tensor, pool_size, strides, name, padding=DEFAULT_PADDING):
""""Average pooling."""
if isinstance(padding, str):
padded_input = input_tensor
padding_type = padding
else:
padded_input = caffe_like_padding(input_tensor, padding)
padding_type = 'VALID'
return tf.layers.average_pooling2d(padded_input,
pool_size=pool_size,
strides=strides,
padding=padding_type,
name=name)
@layer
def concat(self, input_tensors, axis, name):
return tf.concat(values=input_tensors, axis=axis, name=name)
@layer
def add(self, input_tensors, name):
return tf.add_n(input_tensors, name=name)
@layer
def fc(self, input_tensor, num_out, name, biased=True, relu=True, flatten=True, reuse=False):
# To behave same to Caffe.
if flatten:
flatten_tensor = tf.layers.flatten(input_tensor)
else:
flatten_tensor = input_tensor
return tf.layers.dense(flatten_tensor,
units=num_out,
use_bias=biased,
activation=tf.nn.relu if relu else None,
trainable=self.trainable,
reuse=self.reuse or reuse,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer if biased else None,
name=name)
@layer
def fc_bn(self, input_tensor, num_out, name,
biased=False, relu=True, center=False, scale=False, flatten=True, reuse=False):
# To behave same to Caffe.
fc = self.fc(input_tensor, num_out, name, relu=False,
biased=biased, flatten=flatten, reuse=reuse)
fc_bn = self.batch_normalization(fc, name + '/bn',
center=center, scale=scale, relu=relu, reuse=reuse)
return fc_bn
@layer
def softmax(self, input_tensor, name, dim=-1):
return tf.nn.softmax(input_tensor, dim=dim, name=name)
@layer
def batch_normalization(self, input_tensor, name,
center=False, scale=False, relu=False, reuse=False):
"""Batch normalization."""
output = tf.layers.batch_normalization(input_tensor,
center=center,
scale=scale,
fused=True,
training=self.training,
trainable=self.trainable,
reuse=self.reuse or reuse,
epsilon=self.bn_epsilon,
gamma_regularizer=None, # self.regularizer if scale else None,
beta_regularizer=None, # self.regularizer if center else None,
name=name)
if relu:
output = self.relu(output, name + '/relu')
return output
@layer
def context_normalization(self, input_tensor, name):
"""The input is a feature matrix with a shape of BxNx1xD"""
mean, variance = tf.nn.moments(input_tensor, axes=[1], keep_dims=True)
output = tf.nn.batch_normalization(
input_tensor, mean, variance, None, None, self.bn_epsilon)
return output
@layer
def l2norm(self, input_tensor, name, axis=-1):
return tf.nn.l2_normalize(input_tensor, axis=axis, name=name)
@layer
def squeeze(self, input_tensor, axis=None, name=None):
return tf.squeeze(input_tensor, axis=axis, name=name)
@layer
def reshape(self, input_tensor, shape, name=None):
return tf.reshape(input_tensor, shape, name=name)
@layer
def flatten(self, input_tensor, name=None):
return tf.layers.flatten(input_tensor, name=name)
@layer
def tanh(self, input_tensor, name=None):
return tf.tanh(input_tensor, name=name)
| 17,219 | 38.586207 | 110 | py |
pyslam | pyslam-master/test/thirdparty/test_delf.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Forked from:
# https://github.com/tensorflow/models/blob/master/research/delf/delf/python/examples/extract_features.py
"""Extracts DELF features from a given image and save results to file.
The images must be in JPG format. The program checks if descriptors already
exist, and skips computation for those.
"""
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('delf')
import cv2
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
import argparse
import os
import sys
import time
import json
import numpy as np
import h5py
if False:
import tensorflow as tf
else:
# from https://stackoverflow.com/questions/56820327/the-name-tf-session-is-deprecated-please-use-tf-compat-v1-session-instead
import tensorflow.compat.v1 as tf
# from https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96 to cope with the following error:
# "[...tensorflow/stream_executor/cuda/cuda_dnn.cc:329] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR"
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.gpu_options.per_process_gpu_memory_fraction=0.333 # from https://stackoverflow.com/questions/34199233/how-to-prevent-tensorflow-from-allocating-the-totality-of-a-gpu-memory
#session = tf.Session(config=tf_config, ...)
from google.protobuf import text_format
from tensorflow.python.platform import app
# from delf import delf_config_pb2
# from delf import feature_extractor
# from delf import feature_io
from delf.protos import aggregation_config_pb2
from delf.protos import box_pb2
from delf.protos import datum_pb2
from delf.protos import delf_config_pb2
from delf.protos import feature_pb2
from delf.python import box_io
from delf.python import datum_io
from delf.python import delf_v1
from delf.python import feature_aggregation_extractor
from delf.python import feature_aggregation_similarity
from delf.python import feature_extractor
from delf.python import feature_io
from delf.python.examples import detector
from delf.python.examples import extractor
from delf.python import detect_to_retrieve
from delf.python import google_landmarks_dataset
delf_base_path='../../thirdparty/tensorflow_models/research/delf/delf/python/'
delf_config_file= delf_base_path + 'examples/delf_config_example.pbtxt'
delf_model_path=delf_base_path + 'examples/parameters/delf_gld_20190411/model/'
delf_mean_path=delf_base_path + 'examples/parameters/delf_gld_20190411/pca/mean.datum'
delf_projection_matrix_path=delf_base_path + 'examples/parameters/delf_gld_20190411/pca/pca_proj_mat.datum'
cmd_args = None
# Extension of feature files.
_DELF_EXT = '.h5'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def MakeExtractor(sess, config, import_scope=None):
"""Creates a function to extract features from an image.
Args:
sess: TensorFlow session to use.
config: DelfConfig proto containing the model configuration.
import_scope: Optional scope to use for model.
Returns:
Function that receives an image and returns features.
"""
tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING],
config.model_path,
import_scope=import_scope)
import_scope_prefix = import_scope + '/' if import_scope is not None else ''
input_image = sess.graph.get_tensor_by_name('%sinput_image:0' %
import_scope_prefix)
input_score_threshold = sess.graph.get_tensor_by_name(
'%sinput_abs_thres:0' % import_scope_prefix)
input_image_scales = sess.graph.get_tensor_by_name('%sinput_scales:0' %
import_scope_prefix)
input_max_feature_num = sess.graph.get_tensor_by_name(
'%sinput_max_feature_num:0' % import_scope_prefix)
boxes = sess.graph.get_tensor_by_name('%sboxes:0' % import_scope_prefix)
raw_descriptors = sess.graph.get_tensor_by_name('%sfeatures:0' %
import_scope_prefix)
feature_scales = sess.graph.get_tensor_by_name('%sscales:0' %
import_scope_prefix)
attention_with_extra_dim = sess.graph.get_tensor_by_name(
'%sscores:0' % import_scope_prefix)
attention = tf.reshape(attention_with_extra_dim,
[tf.shape(attention_with_extra_dim)[0]])
locations, descriptors = feature_extractor.DelfFeaturePostProcessing(
boxes, raw_descriptors, config)
def ExtractorFn(image):
"""Receives an image and returns DELF features.
Args:
image: Uint8 array with shape (height, width 3) containing the RGB image.
Returns:
Tuple (locations, descriptors, feature_scales, attention)
"""
return sess.run([locations, descriptors, feature_scales, attention],
feed_dict={
input_image: image,
input_score_threshold: config.delf_local_config.score_threshold,
input_image_scales: list(config.image_scales),
input_max_feature_num: config.delf_local_config.max_feature_num
})
return ExtractorFn
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Read list of images.
#tf.logging.info('Reading list of images...')
#image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = 1 #len(image_paths)
#tf.logging.info('done! Found %d images', num_images)
img = cv2.imread('../data/kitti06-12-color.png',cv2.IMREAD_COLOR)
#value = tf.gfile.FastGFile('../data/kitti06-12-color.jpg', 'rb').read()
# Parse DelfConfig proto.
delf_config = delf_config_pb2.DelfConfig()
with tf.gfile.FastGFile(cmd_args.config_path, 'r') as f:
text_format.Merge(f.read(), delf_config)
delf_config.model_path = delf_model_path
delf_config.delf_local_config.pca_parameters.mean_path = delf_mean_path
delf_config.delf_local_config.pca_parameters.projection_matrix_path = delf_projection_matrix_path
print('config:', delf_config)
# Create output directory if necessary.
if not os.path.exists(cmd_args.output_dir):
os.makedirs(cmd_args.output_dir)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Reading list of images.
#filename_queue = tf.train.string_input_producer(image_paths, shuffle=False)
#reader = tf.WholeFileReader()
#_, value = reader.read(filename_queue)
#image_tf = tf.image.decode_jpeg(value, channels=3) # Returns a `Tensor` of type `uint8`.
# from https://stackoverflow.com/questions/48727264/how-to-convert-numpy-array-image-to-tensorflow-image
#image_tf = np.array(img)[:, :, 0:3]
#image_tf = tf.convert_to_tensor(img, np.float32)
#print('\nimagetf info',np.info(image_tf))
# run the network to get the predictions
#predictions = sess.run(finalTensor, {'DecodeJpeg:0': tfImage})
with tf.Session(config=tf_config) as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
extractor_fn = MakeExtractor(sess, delf_config)
# Start input enqueue threads.
#coord = tf.train.Coordinator()
#threads = tf.train.start_queue_runners(sess=sess, coord=coord)
start = time.clock()
with h5py.File(os.path.join(cmd_args.output_dir, 'keypoints.h5'), 'w') as h5_kp, \
h5py.File(os.path.join(cmd_args.output_dir, 'descriptors.h5'), 'w') as h5_desc, \
h5py.File(os.path.join(cmd_args.output_dir, 'scores.h5'), 'w') as h5_score, \
h5py.File(os.path.join(cmd_args.output_dir, 'scales.h5'), 'w') as h5_scale:
for i in range(num_images):
key = 'img' #os.path.splitext(os.path.basename(image_paths[i]))[0]
print('Processing "{}"'.format(key))
# # Write to log-info once in a while.
# if i == 0:
# tf.logging.info(
# 'Starting to extract DELF features from images...')
# elif i % _STATUS_CHECK_ITERATIONS == 0:
# elapsed = (time.clock() - start)
# tf.logging.info(
# 'Processing image %d out of %d, last %d '
# 'images took %f seconds', i, num_images,
# _STATUS_CHECK_ITERATIONS, elapsed)
# start = time.clock()
# # Get next image.
image_tf = tf.convert_to_tensor(img, np.float32)
im = sess.run(image_tf)
#im = sess.run({'DecodeJpeg:0': image_tf}) # from https://stackoverflow.com/questions/48727264/how-to-convert-numpy-array-image-to-tensorflow-image
# If descriptor already exists, skip its computation.
# out_desc_filename = os.path.splitext(os.path.basename(
# image_paths[i]))[0] + _DELF_EXT
# out_desc_fullpath = os.path.join(cmd_args.output_dir, out_desc_filename)
# if tf.gfile.Exists(out_desc_fullpath):
# tf.logging.info('Skipping %s', image_paths[i])
# continue
# Extract and save features.
(locations_out, descriptors_out, feature_scales_out, attention_out) = extractor_fn(im)
# np.savez('{}.npz'.format(config.delf_local_config.max_feature_num), keypoints=locations_out)
# feature_io.WriteToFile(out_desc_fullpath, locations_out,
# feature_scales_out, descriptors_out,
# attention_out)
h5_kp[key] = locations_out[:, ::-1]
h5_desc[key] = descriptors_out
h5_scale[key] = feature_scales_out
h5_score[key] = attention_out
print('#extracted keypoints:',len(h5_kp[key]))
print('des[0]:',descriptors_out[0])
print('done!')
# Finalize enqueue threads.
#coord.request_stop()
#coord.join(threads)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--config_path',
type=str,
default=delf_config_file, #'misc/delf/delf_config_example.pbtxt',
help="""
Path to DelfConfig proto text file with configuration to be used for DELF
extraction.
""")
parser.add_argument(
'--list_images_path',
type=str,
help="""
Path to list of images whose DELF features will be extracted.
""")
parser.add_argument(
'--output_dir',
type=str,
default='./delf',
help="""
Directory where DELF features will be written to. Each image's features
will be written to a file with same name, and extension replaced by .delf.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed) | 12,641 | 41.280936 | 183 | py |
pyslam | pyslam-master/test/thirdparty/test_tfeat.py | import sys
sys.path.append("../../")
import config
config.cfg.set_lib('tfeat')
import torchvision as tv
import phototour
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
import math
import tfeat_model
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import os
import cv2
import tfeat_utils
import numpy as np
import cv2
from matplotlib import pyplot as plt
tfeat_base_path='../../thirdparty/tfeat/'
# open images
img1 = cv2.imread(tfeat_base_path + 'imgs/v_churchill/1.ppm',0)
img2 = cv2.imread(tfeat_base_path + 'imgs/v_churchill/6.ppm',0)
#init tfeat and load the trained weights
tfeat = tfeat_model.TNet()
models_path = tfeat_base_path + 'pretrained-models'
net_name = 'tfeat-liberty'
tfeat.load_state_dict(torch.load(os.path.join(models_path,net_name+".params")))
tfeat.cuda()
tfeat.eval()
# use BRISK detector
brisk = cv2.BRISK_create()
kp1, des1 = brisk.detectAndCompute(img1,None)
kp2, des2 = brisk.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.8*n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,0, flags=2)
plt.subplot(1, 2, 1)
plt.title('BRISK detector and descriptor', fontsize=10)
plt.imshow(img3)
# mag_factor is how many times the original keypoint scale
# is enlarged to generate a patch from a keypoint
mag_factor = 3
#print('kp1: ', kp1)
desc_tfeat1 = tfeat_utils.describe_opencv(tfeat, img1, kp1, 32, mag_factor)
desc_tfeat2 = tfeat_utils.describe_opencv(tfeat, img2, kp2, 32, mag_factor)
bf = cv2.BFMatcher(cv2.NORM_L2)
matches = bf.knnMatch(desc_tfeat1,desc_tfeat2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.8*n.distance:
good.append([m])
img4 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,0, flags=2)
plt.subplot(1, 2, 2)
plt.title('BRISK detector and TFEAT descriptor', fontsize=10)
plt.imshow(img4)
plt.show()
| 2,048 | 24.936709 | 79 | py |
pyslam | pyslam-master/test/thirdparty/test_hardnet_dense.py | #!/usr/bin/python3 -utt
# -*- coding: utf-8 -*-
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import time
import os
#sys.path.insert(0, '/home/ubuntu/dev/opencv-3.1/build/lib')
import cv2
import math
import numpy as np
from PIL import Image
hardnet_base_path='../../thirdparty/hardnet/'
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1, keepdim = True) + self.eps)
x= x / norm.expand_as(x)
return x
class LocalNorm2d(nn.Module):
def __init__(self, kernel_size = 32):
super(LocalNorm2d, self).__init__()
self.ks = kernel_size
self.pool = nn.AvgPool2d(kernel_size = self.ks, stride = 1, padding = 0)
self.eps = 1e-10
return
def forward(self,x):
pd = int(self.ks/2)
mean = self.pool(F.pad(x, (pd,pd,pd,pd), 'reflect'))
return torch.clamp((x - mean) / (torch.sqrt(torch.abs(self.pool(F.pad(x*x, (pd,pd,pd,pd), 'reflect')) - mean*mean )) + self.eps), min = -6.0, max = 6.0)
class DenseHardNet(nn.Module):
"""HardNet model definition
"""
def __init__(self, _stride = 2):
super(DenseHardNet, self).__init__()
self.input_norm = LocalNorm2d(17)
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=_stride, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=_stride,padding=1, bias = False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(128, 128, kernel_size=8, bias = False),
nn.BatchNorm2d(128, affine=False),
L2Norm()
)
return
def forward(self, input, upscale = False):
if input.size(1) > 1:
feats = self.features(self.input_norm(input.mean(dim = 1, keepdim = True)))
else:
feats = self.features(self.input_norm(input))
if upscale:
return F.upsample(feats, (input.size(2), input.size(3)),mode='bilinear')
return feats
def load_grayscale_var(fname):
img = Image.open(fname).convert('RGB')
img = np.mean(np.array(img), axis = 2) # convert to gray scale by averaging R-G-B channels
var_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)), volatile = True)
var_image_reshape = var_image.view(1, 1, var_image.size(0),var_image.size(1))
return var_image_reshape
if __name__ == '__main__':
DO_CUDA = True
UPSCALE = False
stride = 2;
try:
if len(sys.argv) > 1:
input_img_fname = sys.argv[1]
output_fname = sys.argv[2]
if len(sys.argv) > 3:
DO_CUDA = sys.argv[3] != 'cpu'
if len(sys.argv) > 4:
UPSCALE = sys.argv[4] == 'UPSCALE'
if sys.argv[4] == 'NOSTRIDE':
stride = 1
else:
input_img_fname = '../data/kitti06-12-color.png'
output_fname = 'hardnet.out.txt'
DO_CUDA = True
except:
#print("Wrong input format. Try ./extract_DenseHardNet.py imgs/ref.png out.txt gpu")
print("Wrong input format. Try " + sys.argv[0] + " imgs/ref.png out.txt gpu")
sys.exit(1)
model_weights = hardnet_base_path + 'pretrained/pretrained_all_datasets/HardNet++.pth'
model = DenseHardNet(stride)
checkpoint = torch.load(model_weights)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
img = load_grayscale_var(input_img_fname)
if DO_CUDA:
model.cuda()
img = img.cuda()
print('Extracting on GPU')
else:
print('Extracting on CPU')
model = model.cpu()
t = time.time()
with torch.no_grad():
desc = model(img, UPSCALE)
et = time.time() - t
print('processing', et)
desc_numpy = desc.cpu().detach().float().squeeze().numpy();
desc_numpy = np.clip(((desc_numpy + 0.45) * 210.0).astype(np.int32), 0, 255).astype(np.uint8)
print(desc_numpy.shape)
np.save(output_fname, desc_numpy) | 4,912 | 35.93985 | 161 | py |
pyslam | pyslam-master/test/thirdparty/test_logpolar.py | # Copyright 2019 EPFL, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# adapted from https://raw.githubusercontent.com/cvlab-epfl/log-polar-descriptors/master/example.py
import os
import sys
import argparse
import torch
import cv2
import numpy as np
import h5py
import argparse
from time import time
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('logpolar')
from configs.defaults import _C as cfg
from modules.hardnet.models import HardNet
logpolar_base_path='../../thirdparty/logpolar/'
# Configuration
# Set use_log_polar=False to load the "Cartesian" models used in the paper
def extract_descriptors(input_filename, output_filename, use_log_polar,
num_keypoints, verbose):
# Setup
ROOT = logpolar_base_path # os.getcwd()
if use_log_polar:
config_path = os.path.join(ROOT, 'configs',
'init_one_example_ptn_96.yml')
if verbose:
print('-- Using log-polar models')
else:
config_path = os.path.join(ROOT, 'configs',
'init_one_example_stn_16.yml')
if verbose:
print('-- Using cartesian models')
cfg.merge_from_file(config_path)
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
torch.cuda.manual_seed_all(cfg.TRAINING.SEED)
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if verbose:
if torch.cuda.is_available():
print('-- Using GPU')
else:
print('-- Using CPU')
# Extract SIFT keypoints
img = cv2.imread(input_filename, cv2.IMREAD_GRAYSCALE)
# A safe image size is ~1000px on the largest dimension
# To extract features on larger images you might want to increase the padding
max_size = 1024
if any([s > max_size for s in img.shape]):
h, w = img.shape
if h > w:
img = cv2.resize(img, (int(w * max_size / h), max_size),
cv2.INTER_CUBIC)
elif w > h:
img = cv2.resize(img, (max_size, int(h * max_size / w)),
cv2.INTER_CUBIC)
h, w = img.shape
# get keypoints, scale and locatinos from SIFT or another detector
sift = cv2.xfeatures2d.SIFT_create(num_keypoints)
keypoints = sift.detect(img, None)
pts = np.array([kp.pt for kp in keypoints])
scales = np.array([kp.size for kp in keypoints])
oris = np.array([kp.angle for kp in keypoints])
# Mirror-pad the image to avoid boundary effects
if any([s > cfg.TEST.PAD_TO for s in img.shape[:2]]):
raise RuntimeError(
"Image exceeds acceptable size ({}x{}), please downsample".format(
cfg.TEST.PAD_TO, cfg.TEST.PAD_TO))
fillHeight = cfg.TEST.PAD_TO - img.shape[0]
fillWidth = cfg.TEST.PAD_TO - img.shape[1]
padLeft = int(np.round(fillWidth / 2))
padRight = int(fillWidth - padLeft)
padUp = int(np.round(fillHeight / 2))
padDown = int(fillHeight - padUp)
img = np.pad(img,
pad_width=((padUp, padDown), (padLeft, padRight)),
mode='reflect')
if verbose:
print('-- Padding image from {}x{} to {}x{}'.format(
h, w, img.shape[0], img.shape[1]))
# Normalize keypoint locations
kp_norm = []
for i, p in enumerate(pts):
_p = 2 * np.array([(p[0] + padLeft) / (cfg.TEST.PAD_TO),
(p[1] + padUp) / (cfg.TEST.PAD_TO)]) - 1
kp_norm.append(_p)
theta = [
torch.from_numpy(np.array(kp_norm)).float().squeeze(),
torch.from_numpy(scales).float(),
torch.from_numpy(np.array([np.deg2rad(o) for o in oris])).float()
]
# Instantiate the model
t = time()
model = HardNet(transform=cfg.TEST.TRANSFORMER,
coords=cfg.TEST.COORDS,
patch_size=cfg.TEST.IMAGE_SIZE,
scale=cfg.TEST.SCALE,
is_desc256=cfg.TEST.IS_DESC_256,
orientCorrect=cfg.TEST.ORIENT_CORRECTION)
# Load weights
model.load_state_dict(torch.load(logpolar_base_path + cfg.TEST.MODEL_WEIGHTS)['state_dict'])
model.eval()
model.to(device)
if verbose:
print('-- Instantiated model in {:0.2f} sec.'.format(time() - t))
# Extract descriptors
imgs, img_keypoints = torch.from_numpy(img).unsqueeze(0).to(device), \
[theta[0].to(device), theta[1].to(device), theta[2].to(device)]
t = time()
descriptors, patches = model({input_filename: imgs}, img_keypoints,
[input_filename] * len(img_keypoints[0]))
if verbose:
print('-- Computed {} descriptors in {:0.2f} sec.'.format(
descriptors.shape[0],
time() - t))
keypoints_array = np.concatenate([pts, scales[..., None], oris[..., None]],
axis=1)
t = time()
with h5py.File(output_filename, 'w') as f:
f['keypoints'] = keypoints_array
f['descriptors'] = descriptors.cpu().detach().numpy()
print('-- Saved {} descriptors in {:0.2f} sec.'.format(
descriptors.shape[0],
time() - t))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input',
type=str,
default=logpolar_base_path+'testImg.jpeg',
help='Input image')
parser.add_argument('--output',
type=str,
default=logpolar_base_path+'testImg.h5',
help='Output file')
parser.add_argument('--use_log_polar',
type=bool,
default=True,
help='Use log-polar models. Set to False to use '
'cartesian models instead.')
parser.add_argument('--num_keypoints',
type=int,
default=1024,
help='Number of keypoints')
parser.add_argument('--verbose',
type=bool,
default=True,
help='Set to False to suppress feedback')
config, unparsed = parser.parse_known_args()
if len(unparsed) > 0:
parser.print_usage()
else:
extract_descriptors(config.input, config.output, config.use_log_polar,
config.num_keypoints, config.verbose) | 7,015 | 34.979487 | 99 | py |
pyslam | pyslam-master/test/thirdparty/test_l2net_keras.py | import sys
sys.path.append("../../")
import config
config.cfg.set_lib('l2net_keras')
import cv2
import numpy as np
from L2_Net import L2Net
# One of "L2Net-HP", "L2Net-HP+", "L2Net-LIB", "L2Net-LIB+", "L2Net-ND", "L2Net-ND+", "L2Net-YOS", "L2Net-YOS+",
net_name = 'L2Net-HP'
l2net = L2Net(net_name,do_tf_logging=False)
if False:
patches = np.random.rand(100, 32, 32, 1)
else:
patches = np.random.rand(100, 32, 32)
patches = np.expand_dims(patches, -1)
descrs = l2net.calc_descriptors(patches)
print('done!') | 537 | 22.391304 | 113 | py |
pyslam | pyslam-master/test/thirdparty/test_hardnet_patches.py | #!/usr/bin/python3 -utt
# -*- coding: utf-8 -*-
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import time
import os
import cv2
import math
import numpy as np
hardnet_base_path='../../thirdparty/hardnet/'
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sum(torch.abs(x), dim = 1) + self.eps
x= x / norm.expand_as(x)
return x
class HardNet(nn.Module):
"""HardNet model definition
"""
def __init__(self):
super(HardNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2,padding=1, bias = False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias = False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(128, 128, kernel_size=8, bias = False),
nn.BatchNorm2d(128, affine=False),
)
#self.features.apply(weights_init)
def input_norm(self,x):
flat = x.view(x.size(0), -1)
mp = torch.mean(flat, dim=1)
sp = torch.std(flat, dim=1) + 1e-7
return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)
def forward(self, input):
x_features = self.features(self.input_norm(input))
x = x_features.view(x_features.size(0), -1)
return L2Norm()(x)
if __name__ == '__main__':
DO_CUDA = True
try:
if len(sys.argv) > 1:
input_img_fname = sys.argv[1]
output_fname = sys.argv[2]
if len(sys.argv) > 3:
DO_CUDA = sys.argv[3] != 'cpu'
else:
input_img_fname = '../data/kitti06-12-color.png'
output_fname = 'hardnet.out.txt'
DO_CUDA = True
except:
print("Wrong input format. Try " + sys.argv[0] + " imgs/ref.png out.txt gpu")
sys.exit(1)
model_weights = hardnet_base_path + 'pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth'
model = HardNet()
checkpoint = torch.load(model_weights)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
if DO_CUDA:
model.cuda()
print('Extracting on GPU')
else:
print('Extracting on CPU')
model = model.cpu()
image = cv2.imread(input_img_fname,0)
h,w = image.shape
print(h,w)
patch_size = 32
nh_patches = math.ceil(h/patch_size)
nw_patches = math.ceil(w/patch_size)
n_patches = nw_patches * nh_patches
print('Amount of patches: {} ({}x{})'.format(n_patches,nh_patches,nw_patches))
t = time.time()
patches = np.ndarray((n_patches, 1, 32, 32), dtype=np.float32)
for i in range(nh_patches):
for j in range(nw_patches):
patch = image[i*patch_size: (i+1)*patch_size, j*patch_size: (j+1)*patch_size]
patches[i,0,:,:] = cv2.resize(patch,(32,32)) / 255.
patches -= 0.443728476019
patches /= 0.20197947209
bs = 128
n_batches = int(n_patches / bs) + 1
t = time.time()
descriptors_for_net = np.zeros((len(patches), 128))
for i in range(0, len(patches), bs):
data_a = patches[i: i + bs, :, :, :].astype(np.float32)
data_a = torch.from_numpy(data_a)
if DO_CUDA:
data_a = data_a.cuda()
data_a = Variable(data_a)
# compute output
with torch.no_grad():
out_a = model(data_a)
descriptors_for_net[i: i + bs,:] = out_a.data.cpu().numpy().reshape(-1, 128)
print(descriptors_for_net.shape)
assert n_patches == descriptors_for_net.shape[0]
et = time.time() - t
print('processing', et, et/max(float(n_patches),1), ' per patch')
np.savetxt(output_fname, descriptors_for_net, delimiter=' ', fmt='%10.5f') | 4,935 | 33.517483 | 155 | py |
pyslam | pyslam-master/test/thirdparty/test_disk.py | # adapted from https://github.com/cvlab-epfl/disk/blob/master/detect.py
import torch, os, argparse, h5py, warnings, imageio
import numpy as np
from tqdm import tqdm
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('disk')
config.cfg.set_lib('torch-dimcheck')
config.cfg.set_lib('torch-localize')
config.cfg.set_lib('unets')
import multiprocessing as mp
import torch, h5py, imageio, os, argparse
import numpy as np
import torch.nn.functional as F
from functools import partial
from torch.utils.data import DataLoader
from tqdm import tqdm
from torch_dimcheck import dimchecked
from disk import DISK, Features
class Image:
def __init__(self, bitmap: ['C', 'H', 'W'], fname: str, orig_shape=None):
self.bitmap = bitmap
self.fname = fname
if orig_shape is None:
self.orig_shape = self.bitmap.shape[1:]
else:
self.orig_shape = orig_shape
def resize_to(self, shape):
return Image(
self._pad(self._interpolate(self.bitmap, shape), shape),
self.fname,
orig_shape=self.bitmap.shape[1:],
)
@dimchecked
def to_image_coord(self, xys: [2, 'N']) -> ([2, 'N'], ['N']):
f, _size = self._compute_interpolation_size(self.bitmap.shape[1:])
scaled = xys / f
h, w = self.orig_shape
x, y = scaled
mask = (0 <= x) & (x < w) & (0 <= y) & (y < h)
return scaled, mask
def _compute_interpolation_size(self, shape):
x_factor = self.orig_shape[0] / shape[0]
y_factor = self.orig_shape[1] / shape[1]
f = 1 / max(x_factor, y_factor)
if x_factor > y_factor:
new_size = (shape[0], int(f * self.orig_shape[1]))
else:
new_size = (int(f * self.orig_shape[0]), shape[1])
return f, new_size
@dimchecked
def _interpolate(self, image: ['C', 'H', 'W'], shape) -> ['C', 'h', 'w']:
_f, size = self._compute_interpolation_size(shape)
return F.interpolate(
image.unsqueeze(0),
size=size,
mode='bilinear',
align_corners=False,
).squeeze(0)
@dimchecked
def _pad(self, image: ['C', 'H', 'W'], shape) -> ['C', 'h', 'w']:
x_pad = shape[0] - image.shape[1]
y_pad = shape[1] - image.shape[2]
if x_pad < 0 or y_pad < 0:
raise ValueError("Attempting to pad by negative value")
return F.pad(image, (0, y_pad, 0, x_pad))
class SceneDataset:
def __init__(self, image_path, crop_size=(None, None)):
self.image_path = image_path
self.crop_size = crop_size
self.names = [p for p in os.listdir(image_path) \
if p.endswith(args.image_extension)]
print('image names:',self.names)
def __len__(self):
return len(self.names)
def __getitem__(self, ix):
name = self.names[ix]
path = os.path.join(self.image_path, name)
img = np.ascontiguousarray(imageio.imread(path))
tensor = torch.from_numpy(img).to(torch.float32)
if len(tensor.shape) == 2: # some images may be grayscale
tensor = tensor.unsqueeze(-1).expand(-1, -1, 3)
bitmap = tensor.permute(2, 0, 1) / 255.
extensionless_fname = os.path.splitext(name)[0]
image = Image(bitmap, extensionless_fname)
if self.crop_size != (None, None):
image = image.resize_to(self.crop_size)
return image
@staticmethod
def collate_fn(images):
bitmaps = torch.stack([im.bitmap for im in images], dim=0)
return bitmaps, images
def extract(dataset, save_path):
dataloader = DataLoader(
dataset,
batch_size=1,
pin_memory=True,
collate_fn=dataset.collate_fn,
num_workers=4,
)
if args.mode == 'nms':
extract = partial(
model.features,
kind='nms',
window_size=args.window,
cutoff=0.,
n=args.n
)
else:
extract = partial(model.features, kind='rng')
os.makedirs(os.path.join(save_path), exist_ok=True)
keypoint_h5 = h5py.File(os.path.join(save_path, 'keypoints.h5'), 'w')
descriptor_h5 = h5py.File(os.path.join(save_path, 'descriptors.h5'), 'w')
if args.detection_scores:
score_h5 = h5py.File(os.path.join(save_path, 'scores.h5'), 'w')
print('loop:')
pbar = tqdm(dataloader)
for bitmaps, images in pbar:
print('bitmaps: ', bitmaps)
print('images: ', images)
bitmaps = bitmaps.to(DEV, non_blocking=True)
with torch.no_grad():
try:
batched_features = extract(bitmaps)
except RuntimeError as e:
if 'U-Net failed' in str(e):
msg = ('Please use input size which is multiple of 16 (or '
'adjust the --height and --width flags to let this '
'script rescale it automatically). This is because '
'we internally use a U-Net with 4 downsampling '
'steps, each by a factor of 2, therefore 2^4=16.')
raise RuntimeError(msg) from e
else:
raise
for features, image in zip(batched_features.flat, images):
features = features.to(CPU)
kps_crop_space = features.kp.T
kps_img_space, mask = image.to_image_coord(kps_crop_space)
keypoints = kps_img_space.numpy().T[mask]
descriptors = features.desc.numpy()[mask]
scores = features.kp_logp.numpy()[mask]
order = np.argsort(scores)[::-1]
keypoints = keypoints[order]
descriptors = descriptors[order]
scores = scores[order]
assert descriptors.shape[1] == args.desc_dim
assert keypoints.shape[1] == 2
if args.f16:
descriptors = descriptors.astype(np.float16)
# keypoint_h5.create_dataset(image.fname, data=keypoints)
# descriptor_h5.create_dataset(image.fname, data=descriptors)
# if args.detection_scores:
# score_h5.create_dataset(image.fname, data=scores)
pbar.set_postfix(n=keypoints.shape[0])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=(
"Script for detection and description (but not matching) of keypoints. "
"It processes all images with extension given by `--image-extension` found "
"in `image-path` directory. Images are resized to `--height` x `--width` "
"for internal processing (padding them if necessary) and the output "
"coordinates are then transformed back to original image size."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--height', default=None, type=int,
help='rescaled height (px). If unspecified, image is not resized in height dimension'
)
parser.add_argument(
'--width', default=None, type=int,
help='rescaled width (px). If unspecified, image is not resized in width dimension'
)
parser.add_argument(
'--image-extension', default='ppm', type=str,
help='This script ill process all files which match `image-path/*.{--image-extension}`'
)
parser.add_argument(
'--f16', action='store_true',
help='Store descriptors in fp16 (half precision) format'
)
parser.add_argument('--window', type=int, default=5, help='NMS window size')
parser.add_argument(
'--n', type=int, default=None,
help='Maximum number of features to extract. If unspecified, the number is not limited'
)
parser.add_argument(
'--desc-dim', type=int, default=128,
help='descriptor dimension. Needs to match the checkpoint value.'
)
parser.add_argument(
'--mode', choices=['nms', 'rng'], default='nms',
help=('Whether to extract features using the non-maxima suppresion mode or '
'through training-time grid sampling technique')
)
default_model_path = os.path.split(os.path.abspath(__file__))[0] + '/../../thirdparty/disk/depth-save.pth'
parser.add_argument(
'--model_path', type=str, default=default_model_path,
help="Path to the model's .pth save file"
)
parser.add_argument('--detection-scores', action='store_true')
parser.add_argument(
'--h5_path',
default='./out_disk',
help=("Directory where keypoints.h5 and descriptors.h5 will be stored. This"
" will be created if it doesn't already exist.")
)
parser.add_argument(
'--image_path',
default='../data/graf',
help="Directory with images to be processed."
)
args = parser.parse_args()
DEV = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
CPU = torch.device('cpu')
dataset = SceneDataset(args.image_path, crop_size=(args.height, args.width))
state_dict = torch.load(args.model_path, map_location='cpu')
# compatibility with older model saves which used the 'extractor' name
if 'extractor' in state_dict:
weights = state_dict['extractor']
elif 'disk' in state_dict:
weights = state_dict['disk']
else:
raise KeyError('Incompatible weight file!')
model = DISK(window=8, desc_dim=args.desc_dim)
model.load_state_dict(weights)
model = model.to(DEV)
described_samples = extract(dataset, args.h5_path)
| 9,680 | 33.575 | 110 | py |
pyslam | pyslam-master/test/thirdparty/test_d2net.py | # adapted from https://github.com/mihaidusmanu/d2-net/blob/master/extract_features.py
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('d2net')
import os
import argparse
import numpy as np
import imageio
import cv2
import torch
from tqdm import tqdm
import scipy
import scipy.io
import scipy.misc
from lib.model_test import D2Net
from lib.utils import preprocess_image
from lib.pyramid import process_multiscale
d2net_base_path='../../thirdparty/d2net/'
# CUDA
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Argument parsing
parser = argparse.ArgumentParser(description='Feature extraction script')
# parser.add_argument(
# '--image_list_file', type=str, default='../data/kitti06-12-color.png', #required=True,
# help='path to a file containing a list of images to process'
# )
parser.add_argument(
'--image_file', type=str, default='../data/kitti06-12-color.png', #required=True,
help='img file to process'
)
parser.add_argument(
'--preprocessing', type=str, default='caffe',
help='image preprocessing (caffe or torch)'
)
parser.add_argument(
'--model_file', type=str, default=d2net_base_path + 'models/d2_tf.pth',
help='path to the full model'
)
parser.add_argument(
'--max_edge', type=int, default=1600,
help='maximum image size at network input'
)
parser.add_argument(
'--max_sum_edges', type=int, default=2800,
help='maximum sum of image sizes at network input'
)
parser.add_argument(
'--output_extension', type=str, default='.d2-net',
help='extension for the output'
)
parser.add_argument(
'--output_type', type=str, default='npz',
help='output file type (npz or mat)'
)
parser.add_argument(
'--multiscale', dest='multiscale', action='store_true',
help='extract multiscale features'
)
parser.set_defaults(multiscale=False)
parser.add_argument(
'--no-relu', dest='use_relu', action='store_false',
help='remove ReLU after the dense feature extraction module'
)
parser.set_defaults(use_relu=True)
args = parser.parse_args()
print(args)
# Creating CNN model
model = D2Net(
model_file=args.model_file,
use_relu=args.use_relu,
use_cuda=use_cuda
)
# Process the file
# with open(args.image_list_file, 'r') as f:
# lines = f.readlines()
# for line in tqdm(lines, total=len(lines)):
# path = line.strip()
if True:
#image = imageio.imread(path)
path = os.getcwd()
#image = imageio.imread(args.image_file)
image = cv2.imread(args.image_file)
print('image.shape',image.shape)
if len(image.shape) == 2:
image = image[:, :, np.newaxis]
image = np.repeat(image, 3, -1)
# TODO: switch to PIL.Image due to deprecation of scipy.misc.imresize.
resized_image = image
if max(resized_image.shape) > args.max_edge:
resized_image = scipy.misc.imresize(
resized_image,
args.max_edge / max(resized_image.shape)
).astype('float')
if sum(resized_image.shape[: 2]) > args.max_sum_edges:
resized_image = scipy.misc.imresize(
resized_image,
args.max_sum_edges / sum(resized_image.shape[: 2])
).astype('float')
fact_i = image.shape[0] / resized_image.shape[0]
fact_j = image.shape[1] / resized_image.shape[1]
input_image = preprocess_image(
resized_image,
preprocessing=args.preprocessing
)
with torch.no_grad():
if args.multiscale:
keypoints, scores, descriptors = process_multiscale(
torch.tensor(
input_image[np.newaxis, :, :, :].astype(np.float32),
device=device
),
model
)
else:
keypoints, scores, descriptors = process_multiscale(
torch.tensor(
input_image[np.newaxis, :, :, :].astype(np.float32),
device=device
),
model,
scales=[1]
)
# Input image coordinates
keypoints[:, 0] *= fact_i
keypoints[:, 1] *= fact_j
# i, j -> u, v
keypoints = keypoints[:, [1, 0, 2]]
print('#features:',len(keypoints))
print('des[0]',descriptors[0])
if args.output_type == 'npz':
with open(path + args.output_extension, 'wb') as output_file:
np.savez(
output_file,
keypoints=keypoints,
scores=scores,
descriptors=descriptors
)
elif args.output_type == 'mat':
with open(path + args.output_extension, 'wb') as output_file:
scipy.io.savemat(
output_file,
{
'keypoints': keypoints,
'scores': scores,
'descriptors': descriptors
}
)
else:
raise ValueError('Unknown output type.') | 4,953 | 26.370166 | 92 | py |
pyslam | pyslam-master/test/thirdparty/test_keynet.py | # from https://raw.githubusercontent.com/axelBarroso/Key.Net/master/extract_multiscale_features.py
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('keynet')
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
import os, sys, cv2
#sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from os import path, mkdir
import argparse
import keyNet.aux.tools as aux
from skimage.transform import pyramid_gaussian
import HSequences_bench.tools.geometry_tools as geo_tools
import HSequences_bench.tools.repeatability_tools as rep_tools
from keyNet.model.keynet_architecture import *
import keyNet.aux.desc_aux_function as loss_desc
from keyNet.model.hardnet_pytorch import *
from keyNet.datasets.dataset_utils import read_bw_image
import torch
keynet_base_path='../../thirdparty/keynet/'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def check_directory(dir):
if not path.isdir(dir):
mkdir(dir)
def create_result_dir(path):
directories = path.split('/')
tmp = ''
for idx, dir in enumerate(directories):
tmp += (dir + '/')
if idx == len(directories)-1:
continue
check_directory(tmp)
def extract_multiscale_features():
parser = argparse.ArgumentParser(description='HSequences Extract Features')
# parser.add_argument('--list-images', type=str, help='File containing the image paths for extracting features.',
# required=True)
parser.add_argument('--results-dir', type=str, default='extracted_features/',
help='The output path to save the extracted keypoint.')
parser.add_argument('--network-version', type=str, default='KeyNet_default',
help='The Key.Net network version name')
parser.add_argument('--checkpoint-det-dir', type=str, default=keynet_base_path + 'keyNet/pretrained_nets/KeyNet_default',
help='The path to the checkpoint file to load the detector weights.')
parser.add_argument('--pytorch-hardnet-dir', type=str, default=keynet_base_path + 'keyNet/pretrained_nets/HardNet++.pth',
help='The path to the checkpoint file to load the HardNet descriptor weights.')
# Detector Settings
parser.add_argument('--num-filters', type=int, default=8,
help='The number of filters in each learnable block.')
parser.add_argument('--num-learnable-blocks', type=int, default=3,
help='The number of learnable blocks after handcrafted block.')
parser.add_argument('--num-levels-within-net', type=int, default=3,
help='The number of pyramid levels inside the architecture.')
parser.add_argument('--factor-scaling-pyramid', type=float, default=1.2,
help='The scale factor between the multi-scale pyramid levels in the architecture.')
parser.add_argument('--conv-kernel-size', type=int, default=5,
help='The size of the convolutional filters in each of the learnable blocks.')
# Multi-Scale Extractor Settings
parser.add_argument('--extract-MS', type=bool, default=True,
help='Set to True if you want to extract multi-scale features.')
parser.add_argument('--num-points', type=int, default=1500,
help='The number of desired features to extract.')
parser.add_argument('--nms-size', type=int, default=15,
help='The NMS size for computing the validation repeatability.')
parser.add_argument('--border-size', type=int, default=15,
help='The number of pixels to remove from the borders to compute the repeatability.')
parser.add_argument('--order-coord', type=str, default='xysr',
help='The coordinate order that follows the extracted points. Use yxsr or xysr.')
parser.add_argument('--random-seed', type=int, default=12345,
help='The random seed value for TensorFlow and Numpy.')
parser.add_argument('--pyramid_levels', type=int, default=5,
help='The number of downsample levels in the pyramid.')
parser.add_argument('--upsampled-levels', type=int, default=1,
help='The number of upsample levels in the pyramid.')
parser.add_argument('--scale-factor-levels', type=float, default=np.sqrt(2),
help='The scale factor between the pyramid levels.')
parser.add_argument('--scale-factor', type=float, default=2.,
help='The scale factor to extract patches before descriptor.')
# GPU Settings
parser.add_argument('--gpu-memory-fraction', type=float, default=0.3,
help='The fraction of GPU used by the script.')
parser.add_argument('--gpu-visible-devices', type=str, default="0",
help='Set CUDA_VISIBLE_DEVICES variable.')
args = parser.parse_known_args()[0]
# remove verbose bits from tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.logging.set_verbosity(tf.logging.ERROR)
# Set CUDA GPU environment
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_visible_devices
version_network_name = args.network_version
if not args.extract_MS:
args.pyramid_levels = 0
args.upsampled_levels = 0
print('Extract features for : ' + version_network_name)
aux.check_directory(args.results_dir)
aux.check_directory(os.path.join(args.results_dir, version_network_name))
def extract_features(image):
pyramid = pyramid_gaussian(image, max_layer=args.pyramid_levels, downscale=args.scale_factor_levels)
score_maps = {}
for (j, resized) in enumerate(pyramid):
im = resized.reshape(1, resized.shape[0], resized.shape[1], 1)
feed_dict = {
input_network: im,
phase_train: False,
dimension_image: np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
}
im_scores = sess.run(maps, feed_dict=feed_dict)
im_scores = geo_tools.remove_borders(im_scores, borders=args.border_size)
score_maps['map_' + str(j + 1 + args.upsampled_levels)] = im_scores[0, :, :, 0]
if args.upsampled_levels:
for j in range(args.upsampled_levels):
factor = args.scale_factor_levels ** (args.upsampled_levels - j)
up_image = cv2.resize(image, (0, 0), fx=factor, fy=factor)
im = np.reshape(up_image, (1, up_image.shape[0], up_image.shape[1], 1))
feed_dict = {
input_network: im,
phase_train: False,
dimension_image: np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
}
im_scores = sess.run(maps, feed_dict=feed_dict)
im_scores = geo_tools.remove_borders(im_scores, borders=args.border_size)
score_maps['map_' + str(j + 1)] = im_scores[0, :, :, 0]
im_pts = []
for idx_level in range(levels):
scale_value = (args.scale_factor_levels ** (idx_level - args.upsampled_levels))
scale_factor = 1. / scale_value
h_scale = np.asarray([[scale_factor, 0., 0.], [0., scale_factor, 0.], [0., 0., 1.]])
h_scale_inv = np.linalg.inv(h_scale)
h_scale_inv = h_scale_inv / h_scale_inv[2, 2]
num_points_level = point_level[idx_level]
if idx_level > 0:
res_points = int(np.asarray([point_level[a] for a in range(0, idx_level + 1)]).sum() - len(im_pts))
num_points_level = res_points
im_scores = rep_tools.apply_nms(score_maps['map_' + str(idx_level + 1)], args.nms_size)
im_pts_tmp = geo_tools.get_point_coordinates(im_scores, num_points=num_points_level, order_coord='xysr')
im_pts_tmp = geo_tools.apply_homography_to_points(im_pts_tmp, h_scale_inv)
if not idx_level:
im_pts = im_pts_tmp
else:
im_pts = np.concatenate((im_pts, im_pts_tmp), axis=0)
if args.order_coord == 'yxsr':
im_pts = np.asarray(list(map(lambda x: [x[1], x[0], x[2], x[3]], im_pts)))
im_pts = im_pts[(-1 * im_pts[:, 3]).argsort()]
im_pts = im_pts[:args.num_points]
# Extract descriptor from features
descriptors = []
im = image.reshape(1, image.shape[0], image.shape[1], 1)
for idx_desc_batch in range(int(len(im_pts) / 250 + 1)):
points_batch = im_pts[idx_desc_batch * 250: (idx_desc_batch + 1) * 250]
if not len(points_batch):
break
feed_dict = {
input_network: im,
phase_train: False,
kpts_coord: points_batch[:, :2],
kpts_scale: args.scale_factor * points_batch[:, 2],
kpts_batch: np.zeros(len(points_batch)),
dimension_image: np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
}
patch_batch = sess.run(input_patches, feed_dict=feed_dict)
patch_batch = np.reshape(patch_batch, (patch_batch.shape[0], 1, 32, 32))
data_a = torch.from_numpy(patch_batch)
data_a = data_a.cuda()
data_a = Variable(data_a)
with torch.no_grad():
out_a = model(data_a)
desc_batch = out_a.data.cpu().numpy().reshape(-1, 128)
if idx_desc_batch == 0:
descriptors = desc_batch
else:
descriptors = np.concatenate([descriptors, desc_batch], axis=0)
return im_pts, descriptors
with tf.Graph().as_default():
tf.set_random_seed(args.random_seed)
with tf.name_scope('inputs'):
# Define the input tensor shape
tensor_input_shape = (None, None, None, 1)
input_network = tf.placeholder(dtype=tf.float32, shape=tensor_input_shape, name='input_network')
dimension_image = tf.placeholder(dtype=tf.int32, shape=(3,), name='dimension_image')
kpts_coord = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='kpts_coord')
kpts_batch = tf.placeholder(dtype=tf.int32, shape=(None,), name='kpts_batch')
kpts_scale = tf.placeholder(dtype=tf.float32, name='kpts_scale')
phase_train = tf.placeholder(tf.bool, name='phase_train')
with tf.name_scope('model_deep_detector'):
deep_architecture = keynet(args)
output_network = deep_architecture.model(input_network, phase_train, dimension_image, reuse=False)
maps = tf.nn.relu(output_network['output'])
# Extract Patches from inputs:
input_patches = loss_desc.build_patch_extraction(kpts_coord, kpts_batch, input_network, kpts_scale=kpts_scale)
# Define Pytorch HardNet
model = HardNet()
checkpoint = torch.load(args.pytorch_hardnet_dir)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
model.cuda()
# Define variables
detect_var = [v for v in tf.trainable_variables(scope='model_deep_detector')]
if os.listdir(args.checkpoint_det_dir):
init_assign_op_det, init_feed_dict_det = tf_contrib.framework.assign_from_checkpoint(
tf.train.latest_checkpoint(args.checkpoint_det_dir), detect_var)
point_level = []
tmp = 0.0
factor_points = (args.scale_factor_levels ** 2)
levels = args.pyramid_levels + args.upsampled_levels + 1
for idx_level in range(levels):
tmp += factor_points ** (-1 * (idx_level - args.upsampled_levels))
point_level.append(args.num_points * factor_points ** (-1 * (idx_level - args.upsampled_levels)))
point_level = np.asarray(list(map(lambda x: int(x / tmp), point_level)))
# GPU Usage
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if os.listdir(args.checkpoint_det_dir):
sess.run(init_assign_op_det, init_feed_dict_det)
# # read image and extract keypoints and descriptors
# f = open(args.list_images, "r")
# for path_to_image in f:
# path = path_to_image.split('\n')[0]
# if not os.path.exists(path):
# print('[ERROR]: File {0} not found!'.format(path))
# return
# create_result_dir(os.path.join(args.results_dir, version_network_name, path))
if True:
path = 'kitti06-12-color.png'
#im = read_bw_image(path)
im = cv2.imread('../data/kitti06-12-color.png',cv2.IMREAD_GRAYSCALE)
im = im.astype(float) / im.max()
im_pts, descriptors = extract_features(im)
print('# extracted points:',len(im_pts))
file_name = os.path.join(args.results_dir, version_network_name, path)+'.kpt'
np.save(file_name, im_pts)
file_name = os.path.join(args.results_dir, version_network_name, path)+'.dsc'
np.save(file_name, descriptors)
if __name__ == '__main__':
extract_multiscale_features()
| 13,674 | 40.189759 | 125 | py |
pyslam | pyslam-master/test/thirdparty/test_r2d2.py | # Copyright 2019-present NAVER Corp.
# CC BY-NC-SA 3.0
# Available only for non-commercial use
# from https://raw.githubusercontent.com/naver/r2d2/master/extract.py
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('r2d2')
import os, pdb
from PIL import Image
import numpy as np
import torch
import cv2
from tools import common
from tools.dataloader import norm_RGB
from nets.patchnet import *
r2d2_base_path='../../thirdparty/r2d2'
r2d2_default_model_path=r2d2_base_path+'/models/r2d2_WASF_N16.pt'
def load_network(model_fn):
checkpoint = torch.load(model_fn)
print("\n>> Creating net = " + checkpoint['net'])
net = eval(checkpoint['net'])
nb_of_weights = common.model_size(net)
print(f" ( Model size: {nb_of_weights/1000:.0f}K parameters )")
# initialization
weights = checkpoint['state_dict']
net.load_state_dict({k.replace('module.',''):v for k,v in weights.items()})
return net.eval()
class NonMaxSuppression (torch.nn.Module):
def __init__(self, rel_thr=0.7, rep_thr=0.7):
nn.Module.__init__(self)
self.max_filter = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.rel_thr = rel_thr
self.rep_thr = rep_thr
def forward(self, reliability, repeatability, **kw):
assert len(reliability) == len(repeatability) == 1
reliability, repeatability = reliability[0], repeatability[0]
# local maxima
maxima = (repeatability == self.max_filter(repeatability))
# remove low peaks
maxima *= (repeatability >= self.rep_thr)
maxima *= (reliability >= self.rel_thr)
return maxima.nonzero().t()[2:4]
def extract_multiscale( net, img, detector, scale_f=2**0.25,
min_scale=0.0, max_scale=1,
min_size=256, max_size=1024,
verbose=False):
old_bm = torch.backends.cudnn.benchmark
torch.backends.cudnn.benchmark = False # speedup
# extract keypoints at multiple scales
B, three, H, W = img.shape
assert B == 1 and three == 3, "should be a batch with a single RGB image"
assert max_scale <= 1
s = 1.0 # current scale factor
X,Y,S,C,Q,D = [],[],[],[],[],[]
while s+0.001 >= max(min_scale, min_size / max(H,W)):
if s-0.001 <= min(max_scale, max_size / max(H,W)):
nh, nw = img.shape[2:]
if verbose: print(f"extracting at scale x{s:.02f} = {nw:4d}x{nh:3d}")
# extract descriptors
with torch.no_grad():
res = net(imgs=[img])
# get output and reliability map
descriptors = res['descriptors'][0]
reliability = res['reliability'][0]
repeatability = res['repeatability'][0]
# normalize the reliability for nms
# extract maxima and descs
y,x = detector(**res) # nms
c = reliability[0,0,y,x]
q = repeatability[0,0,y,x]
d = descriptors[0,:,y,x].t()
n = d.shape[0]
# accumulate multiple scales
X.append(x.float() * W/nw)
Y.append(y.float() * H/nh)
S.append((32/s) * torch.ones(n, dtype=torch.float32, device=d.device))
C.append(c)
Q.append(q)
D.append(d)
s /= scale_f
# down-scale the image for next iteration
nh, nw = round(H*s), round(W*s)
img = F.interpolate(img, (nh,nw), mode='bilinear', align_corners=False)
# restore value
torch.backends.cudnn.benchmark = old_bm
Y = torch.cat(Y)
X = torch.cat(X)
S = torch.cat(S) # scale
scores = torch.cat(C) * torch.cat(Q) # scores = reliability * repeatability
XYS = torch.stack([X,Y,S], dim=-1)
D = torch.cat(D)
return XYS, D, scores
def extract_keypoints(args):
iscuda = common.torch_set_gpu(args.gpu)
# load the network...
net = load_network(args.model)
if iscuda: net = net.cuda()
# create the non-maxima detector
detector = NonMaxSuppression(
rel_thr = args.reliability_thr,
rep_thr = args.repeatability_thr)
# while args.images:
# img_path = args.images.pop(0)
# if img_path.endswith('.txt'):
# args.images = open(img_path).read().splitlines() + args.images
# continue
# print(f"\nExtracting features for {img_path}")
# img = Image.open(img_path).convert('RGB')
if True:
img_path='kitti06-12-color.png'
img = cv2.imread('../data/kitti06-12-color.png')
#W, H = img.size
H, W = img.shape[:2]
img = norm_RGB(img)[None]
if iscuda: img = img.cuda()
# extract keypoints/descriptors for a single image
xys, desc, scores = extract_multiscale(net, img, detector,
scale_f = args.scale_f,
min_scale = args.min_scale,
max_scale = args.max_scale,
min_size = args.min_size,
max_size = args.max_size,
verbose = True)
xys = xys.cpu().numpy()
desc = desc.cpu().numpy()
scores = scores.cpu().numpy()
idxs = scores.argsort()[-args.top_k or None:]
outpath = img_path + '.' + args.tag
print(f"Saving {len(idxs)} keypoints to {outpath}")
np.savez(open(outpath,'wb'),
imsize = (W,H),
keypoints = xys[idxs],
descriptors = desc[idxs],
scores = scores[idxs])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser("Extract keypoints for a given image")
parser.add_argument("--model", type=str, default=r2d2_default_model_path, help='model path')
#parser.add_argument("--images", type=str, required=True, nargs='+', help='images / list')
parser.add_argument("--tag", type=str, default='r2d2', help='output file tag')
parser.add_argument("--top-k", type=int, default=5000, help='number of keypoints')
parser.add_argument("--scale-f", type=float, default=2**0.25)
parser.add_argument("--min-size", type=int, default=256)
parser.add_argument("--max-size", type=int, default=1024)
parser.add_argument("--min-scale", type=float, default=0)
parser.add_argument("--max-scale", type=float, default=1)
parser.add_argument("--reliability-thr", type=float, default=0.7)
parser.add_argument("--repeatability-thr", type=float, default=0.7)
parser.add_argument("--gpu", type=int, nargs='+', default=[0], help='use -1 for CPU')
args = parser.parse_args()
extract_keypoints(args)
| 6,659 | 32.807107 | 96 | py |
pyslam | pyslam-master/test/thirdparty/test_geodesc.py | #!/usr/bin/env python
"""
Copyright 2018, Zixin Luo, HKUST.
Conduct pair-wise image matching.
"""
# adapted from https://github.com/lzx551402/geodesc/blob/master/examples/image_matching.py
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('geodesc')
#from __future__ import print_function
import os
import sys
import time
from threading import Thread
from multiprocessing import Queue
import cv2
import numpy as np
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
if False:
import tensorflow as tf
else:
# from https://stackoverflow.com/questions/56820327/the-name-tf-session-is-deprecated-please-use-tf-compat-v1-session-instead
import tensorflow.compat.v1 as tf
# from https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96 to cope with the following error:
# "[...tensorflow/stream_executor/cuda/cuda_dnn.cc:329] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#session = tf.Session(config=config, ...)
from utils_sys import Printer
# CURDIR = os.path.dirname(__file__)
# sys.path.append(os.path.abspath(os.path.join(CURDIR, '..')))
# from utils.tf import load_frozen_model
# from utils.opencvhelper import SiftWrapper, MatcherWrapper
#sys.path.append(os.path.join('third_party', 'geodesc'))
#from thirdparty.geodesc.utils.tf import load_frozen_model
#from thirdparty.geodesc.utils.opencvhelper import SiftWrapper, MatcherWrapper
from geodesc.utils.tf import load_frozen_model
from geodesc.utils.opencvhelper import SiftWrapper, MatcherWrapper
geodesc_base_path='../../thirdparty/geodesc/'
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_path', geodesc_base_path + 'model/geodesc.pb',
"""Path to evaluati3n model.""")
tf.app.flags.DEFINE_integer('batch_size', 512,
"""Inference batch size.""")
tf.app.flags.DEFINE_integer('max_kpt_num', 8192,
"""Maximum number of keypoints. Sampled by octave.""")
tf.app.flags.DEFINE_string('img1_path', geodesc_base_path + '/img/test_img1.png',
"""Path to the first image.""")
tf.app.flags.DEFINE_string('img2_path', geodesc_base_path + '/img/test_img2.png',
"""Path to the second image.""")
tf.app.flags.DEFINE_boolean('cf_sift', False,
"""Compare with SIFT feature.""")
# SIFT options
tf.app.flags.DEFINE_boolean('pyr_off', False,
"""Whether to construct image pyramid.""")
tf.app.flags.DEFINE_boolean('half_sigma', True,
"""Whether to halve the sigma value of SIFT when constructing the pyramid.""")
tf.app.flags.DEFINE_boolean('ori_off', False,
"""Whether to use the orientation estimated from SIFT.""")
def extract_deep_features(sift_wrapper, sess, img_path, qtz=True):
img = cv2.imread(img_path)
if img is None:
Printer.red('cannot find img: ', img_path)
sys.exit(0)
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# detect SIFT keypoints.
start = time.time()
_, cv_kpts = sift_wrapper.detect(gray_img)
end = time.time()
print('Time cost in keypoint detection', end - start)
start = time.time()
sift_wrapper.build_pyramid(gray_img)
end = time.time()
print('Time cost in scale space construction', end - start)
start = time.time()
all_patches = sift_wrapper.get_patches(cv_kpts)
end = time.time()
print('Time cost in patch cropping', end - start)
num_patch = all_patches.shape[0]
if num_patch % FLAGS.batch_size > 0:
loop_num = int(np.floor(float(num_patch) / float(FLAGS.batch_size)))
else:
loop_num = int(num_patch / FLAGS.batch_size - 1)
def _worker(patch_queue, sess, all_feat):
"""The worker thread."""
while True:
patch_data = patch_queue.get()
if patch_data is None:
return
feat = sess.run("squeeze_1:0", feed_dict={"input:0": np.expand_dims(patch_data, -1)})
all_feat.append(feat)
#patch_queue.task_done()
all_feat = []
patch_queue = Queue()
worker_thread = Thread(target=_worker, args=(patch_queue, sess, all_feat))
worker_thread.daemon = True
worker_thread.start()
start = time.time()
# enqueue
for i in range(loop_num + 1):
if i < loop_num:
patch_queue.put(all_patches[i * FLAGS.batch_size: (i + 1) * FLAGS.batch_size])
else:
patch_queue.put(all_patches[i * FLAGS.batch_size:])
# poison pill
patch_queue.put(None)
# wait for extraction.
worker_thread.join()
end = time.time()
print('Time cost in feature extraction', end - start)
all_feat = np.concatenate(all_feat, axis=0)
# quantize output features.
all_feat = (all_feat * 128 + 128).astype(np.uint8) if qtz else all_feat
return all_feat, cv_kpts, img
def main(argv=None): # pylint: disable=unused-argument
"""Program entrance."""
# create sift detector.
sift_wrapper = SiftWrapper(n_sample=FLAGS.max_kpt_num)
sift_wrapper.half_sigma = FLAGS.half_sigma
sift_wrapper.pyr_off = FLAGS.pyr_off
sift_wrapper.ori_off = FLAGS.ori_off
sift_wrapper.create()
# create deep feature extractor.
Printer.yellow('loading model:',FLAGS.model_path,'...')
graph = load_frozen_model(FLAGS.model_path, print_nodes=False)
#sess = tf.Session(graph=graph)
Printer.yellow('...done')
with tf.Session(graph=graph, config=config) as sess:
# extract deep feature from images.
deep_feat1, cv_kpts1, img1 = extract_deep_features(
sift_wrapper, sess, FLAGS.img1_path, qtz=False)
deep_feat2, cv_kpts2, img2 = extract_deep_features(
sift_wrapper, sess, FLAGS.img2_path, qtz=False)
# match features by OpenCV brute-force matcher (CPU).
matcher_wrapper = MatcherWrapper()
# the ratio criterion is set to 0.89 for GeoDesc as described in the paper.
deep_good_matches, deep_mask = matcher_wrapper.get_matches(
deep_feat1, deep_feat2, cv_kpts1, cv_kpts2, ratio=0.89, cross_check=True, info='deep')
deep_display = matcher_wrapper.draw_matches(
img1, cv_kpts1, img2, cv_kpts2, deep_good_matches, deep_mask)
# compare with SIFT.
if FLAGS.cf_sift:
sift_feat1 = sift_wrapper.compute(img1, cv_kpts1)
sift_feat2 = sift_wrapper.compute(img2, cv_kpts2)
sift_good_matches, sift_mask = matcher_wrapper.get_matches(
sift_feat1, sift_feat2, cv_kpts1, cv_kpts2, ratio=0.80, cross_check=True, info='sift')
sift_display = matcher_wrapper.draw_matches(
img1, cv_kpts1, img2, cv_kpts2, sift_good_matches, sift_mask)
display = np.concatenate((sift_display, deep_display), axis=0)
else:
display = deep_display
cv2.imshow('display', display)
cv2.waitKey()
sess.close()
if __name__ == '__main__':
tf.app.run()
| 7,192 | 35.328283 | 130 | py |
pyslam | pyslam-master/test/thirdparty/test_contextdesc.py | #!/usr/bin/env python3
# adpated from https://github.com/lzx551402/contextdesc/blob/master/image_matching.py
"""
Copyright 2019, Zixin Luo, HKUST.
Image matching example.
"""
import sys
sys.path.append("../../")
import config
config.cfg.set_lib('contextdesc',prepend=True)
contextdesc_base_path='../../thirdparty/contextdesc/'
import warnings # to disable tensorflow-numpy warnings: from https://github.com/tensorflow/tensorflow/issues/30427
warnings.filterwarnings('ignore', category=FutureWarning)
import os
import cv2
import numpy as np
if False:
import tensorflow as tf
else:
# from https://stackoverflow.com/questions/56820327/the-name-tf-session-is-deprecated-please-use-tf-compat-v1-session-instead
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
from contextdesc.utils.opencvhelper import MatcherWrapper
#from models import get_model
from contextdesc.models.reg_model import RegModel
from contextdesc.models.loc_model import LocModel
from contextdesc.models.aug_model import AugModel
# from https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96 to cope with the following error:
# "[...tensorflow/stream_executor/cuda/cuda_dnn.cc:329] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR"
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.gpu_options.per_process_gpu_memory_fraction=0.333 # from https://stackoverflow.com/questions/34199233/how-to-prevent-tensorflow-from-allocating-the-totality-of-a-gpu-memory
#session = tf.Session(config=tf_config, ...)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('loc_model', contextdesc_base_path + 'pretrained/contextdesc++',
"""Path to the local feature and augmentation model.""")
tf.app.flags.DEFINE_string('reg_model', contextdesc_base_path + 'pretrained/retrieval_model',
"""Path to the regional feature model.""")
tf.app.flags.DEFINE_string('img1_path', contextdesc_base_path+ 'imgs/test_img1.jpg',
"""Path to the first image.""")
tf.app.flags.DEFINE_string('img2_path', contextdesc_base_path+ 'imgs/test_img2.jpg',
"""Path to the second image.""")
tf.app.flags.DEFINE_integer('n_sample', 2048,
"""Maximum number of keypoints. Sampled by octave.""")
# model options
tf.app.flags.DEFINE_string('model_type', 'pb',
"""Pre-trained model type.""")
tf.app.flags.DEFINE_boolean('dense_desc', False,
"""Whether to use dense descriptor model.""")
# matching options
tf.app.flags.DEFINE_boolean('ratio_test', False,
"""Whether to apply ratio test in matching.""")
tf.app.flags.DEFINE_boolean('cross_check', False,
"""Whether to apply cross check in matching.""")
def load_imgs(img_paths):
rgb_list = []
gray_list = []
for img_path in img_paths:
img = cv2.imread(img_path)
if img is None:
raise IOError('No image found in folder: ', img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
img = img[..., ::-1]
rgb_list.append(img)
gray_list.append(gray)
return rgb_list, gray_list
def extract_regional_features(rgb_list, model_path):
reg_feat_list = []
model = RegModel(model_path) #get_model('reg_model')(model_path)
for _, val in enumerate(rgb_list):
reg_feat = model.run_test_data(val)
reg_feat_list.append(reg_feat)
model.close()
return reg_feat_list
def extract_local_features(gray_list, model_path):
cv_kpts_list = []
loc_info_list = []
loc_feat_list = []
sift_feat_list = []
#model = get_model('loc_model')(model_path, **{'sift_desc': True,
model = LocModel(model_path, **{'sift_desc': True,
'n_feature': 2000,
'n_sample': FLAGS.n_sample,
'peak_thld': 0.04,
'dense_desc': FLAGS.dense_desc,
'upright': False})
for _, val in enumerate(gray_list):
loc_feat, kpt_mb, normalized_xy, cv_kpts, sift_desc = model.run_test_data(val)
raw_kpts = [np.array((i.pt[0], i.pt[1], i.size, i.angle, i.response)) for i in cv_kpts]
raw_kpts = np.stack(raw_kpts, axis=0)
loc_info = np.concatenate((raw_kpts, normalized_xy, loc_feat, kpt_mb), axis=-1)
cv_kpts_list.append(cv_kpts)
loc_info_list.append(loc_info)
sift_feat_list.append(sift_desc)
loc_feat_list.append(loc_feat / np.linalg.norm(loc_feat, axis=-1, keepdims=True))
model.close()
return cv_kpts_list, loc_info_list, loc_feat_list, sift_feat_list
def extract_augmented_features(reg_feat_list, loc_info_list, model_path):
aug_feat_list = []
#model = get_model('aug_model')(model_path, **{'quantz': False})
model = AugModel(model_path, **{'quantz': False})
assert len(reg_feat_list) == len(loc_info_list)
for idx, _ in enumerate(reg_feat_list):
aug_feat, _ = model.run_test_data([reg_feat_list[idx], loc_info_list[idx]])
aug_feat_list.append(aug_feat)
model.close()
return aug_feat_list
def main(argv=None): # pylint: disable=unused-argument
"""Program entrance."""
if FLAGS.model_type == 'pb':
reg_model_path = os.path.join(FLAGS.reg_model, 'reg.pb')
loc_model_path = os.path.join(FLAGS.loc_model, 'loc.pb')
aug_model_path = os.path.join(FLAGS.loc_model, 'aug.pb')
elif FLAGS.model_type == 'ckpt':
reg_model_path = os.path.join(FLAGS.reg_model, 'model.ckpt-550000')
loc_model_path = os.path.join(FLAGS.loc_model, 'model.ckpt-400000')
aug_model_path = os.path.join(FLAGS.loc_model, 'model.ckpt-400000')
else:
raise NotImplementedError
img_paths = [FLAGS.img1_path, FLAGS.img2_path]
# load testing images.
rgb_list, gray_list = load_imgs(img_paths)
# extract regional features.
reg_feat_list = extract_regional_features(rgb_list, reg_model_path)
# extract local features and keypoint matchability.
cv_kpts_list, loc_info_list, loc_feat_list, sift_feat_list = extract_local_features(
gray_list, loc_model_path)
# extract augmented features.
aug_feat_list = extract_augmented_features(reg_feat_list, loc_info_list, aug_model_path)
# feature matching and draw matches.
matcher = MatcherWrapper()
sift_match, sift_mask = matcher.get_matches(
sift_feat_list[0], sift_feat_list[1], cv_kpts_list[0], cv_kpts_list[1],
ratio=0.8 if FLAGS.ratio_test else None, cross_check=FLAGS.cross_check,
err_thld=3, ransac=True, info='SIFT feautre')
base_match, base_mask = matcher.get_matches(
loc_feat_list[0], loc_feat_list[1], cv_kpts_list[0], cv_kpts_list[1],
ratio=0.89 if FLAGS.ratio_test else None, cross_check=FLAGS.cross_check,
err_thld=3, ransac=True, info='Raw local feature')
aug_match, aug_mask = matcher.get_matches(
aug_feat_list[0], aug_feat_list[1], cv_kpts_list[0], cv_kpts_list[1],
ratio=0.89 if FLAGS.ratio_test else None, cross_check=FLAGS.cross_check,
err_thld=3, ransac=True, info='Augmented local feature')
sift_disp = matcher.draw_matches(
rgb_list[0], cv_kpts_list[0], rgb_list[1], cv_kpts_list[1], sift_match, sift_mask)
base_disp = matcher.draw_matches(
rgb_list[0], cv_kpts_list[0], rgb_list[1], cv_kpts_list[1], base_match, base_mask)
aug_disp = matcher.draw_matches(
rgb_list[0], cv_kpts_list[0], rgb_list[1], cv_kpts_list[1], aug_match, aug_mask)
rows, cols = sift_disp.shape[0:2]
white = (np.ones((int(rows / 50), cols, 3)) * 255).astype(np.uint8)
disp = np.concatenate([sift_disp, white, base_disp, white, aug_disp], axis=0)
plt.xticks([])
plt.yticks([])
plt.imshow(disp)
plt.show()
if __name__ == '__main__':
tf.app.run() | 8,106 | 41.668421 | 183 | py |
pyslam | pyslam-master/test/thirdparty/test_sosnet.py | import sys
sys.path.append("../../")
import config
config.cfg.set_lib('sosnet')
import torch
import sosnet_model
import os
tfeat_base_path='../../thirdparty/SOSNet/'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
sosnet32 = sosnet_model.SOSNet32x32()
net_name = 'liberty'
sosnet32.load_state_dict(torch.load(os.path.join(tfeat_base_path, 'sosnet-weights', "sosnet-32x32-" + net_name + ".pth")))
sosnet32.cuda().eval()
patches = torch.rand(100, 1, 32, 32).to(device)
descrs = sosnet32(patches)
print('done!') | 570 | 23.826087 | 122 | py |
GANF | GANF-main/train_traffic.py | #%%
import os
import argparse
import torch
from models.GANF import GANF
import numpy as np
parser = argparse.ArgumentParser()
# files
parser.add_argument('--data_dir', type=str,
default='./data', help='Location of datasets.')
parser.add_argument('--output_dir', type=str,
default='./checkpoint/model')
parser.add_argument('--name',default='traffic')
parser.add_argument('--dataset', type=str, default='metr-la')
# restore
parser.add_argument('--graph', type=str, default='None')
parser.add_argument('--model', type=str, default='None')
parser.add_argument('--seed', type=int, default=10, help='Random seed to use.')
# model parameters
parser.add_argument('--n_blocks', type=int, default=6, help='Number of blocks to stack in a model (MADE in MAF; Coupling+BN in RealNVP).')
parser.add_argument('--n_components', type=int, default=1, help='Number of Gaussian clusters for mixture of gaussians models.')
parser.add_argument('--hidden_size', type=int, default=32, help='Hidden layer size for MADE (and each MADE block in an MAF).')
parser.add_argument('--n_hidden', type=int, default=1, help='Number of hidden layers in each MADE.')
parser.add_argument('--batch_norm', type=bool, default=False)
# training params
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--n_epochs', type=int, default=20)
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate.')
parser.add_argument('--log_interval', type=int, default=5, help='How often to show loss statistics and save samples.')
parser.add_argument('--h_tol', type=float, default=1e-6)
parser.add_argument('--rho_max', type=float, default=1e16)
parser.add_argument('--max_iter', type=int, default=20)
parser.add_argument('--lambda1', type=float, default=0.0)
parser.add_argument('--rho_init', type=float, default=1.0)
parser.add_argument('--alpha_init', type=float, default=0.0)
args = parser.parse_known_args()[0]
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
print(args)
import random
import numpy as np
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
#%%
print("Loading dataset")
from dataset import load_traffic
train_loader, val_loader, test_loader, n_sensor = load_traffic("{}/{}.h5".format(args.data_dir,args.dataset), \
args.batch_size)
#%%
rho = args.rho_init
alpha = args.alpha_init
lambda1 = args.lambda1
h_A_old = np.inf
max_iter = args.max_iter
rho_max = args.rho_max
h_tol = args.h_tol
epoch = 0
# initialize A
if args.graph != 'None':
init = torch.load(args.graph).to(device).abs()
print("Load graph from "+args.graph)
else:
from torch.nn.init import xavier_uniform_
init = torch.zeros([n_sensor, n_sensor])
init = xavier_uniform_(init).abs()
init = init.fill_diagonal_(0.0)
A = torch.tensor(init, requires_grad=True, device=device)
#%%
model = GANF(args.n_blocks, 1, args.hidden_size, args.n_hidden, dropout=0.0, batch_norm=args.batch_norm)
model = model.to(device)
if args.model != 'None':
model.load_state_dict(torch.load(args.model))
print('Load model from '+args.model)
#%%
from torch.nn.utils import clip_grad_value_
save_path = os.path.join(args.output_dir,args.name)
if not os.path.exists(save_path):
os.makedirs(save_path)
loss_best = 100
for _ in range(max_iter):
while rho < rho_max:
lr = args.lr #* np.math.pow(0.1, epoch // 100)
optimizer = torch.optim.Adam([
{'params':model.parameters(), 'weight_decay':args.weight_decay},
{'params': [A]}], lr=lr, weight_decay=0.0)
# train
for _ in range(args.n_epochs):
# train
loss_train = []
epoch += 1
model.train()
for x in train_loader:
x = x.to(device)
optimizer.zero_grad()
A_hat = torch.divide(A.T,A.sum(dim=1).detach()).T
loss = -model(x, A_hat)
h = torch.trace(torch.matrix_exp(A_hat*A_hat)) - n_sensor
total_loss = loss + 0.5 * rho * h * h + alpha * h
total_loss.backward()
clip_grad_value_(model.parameters(), 1)
optimizer.step()
loss_train.append(loss.item())
A.data.copy_(torch.clamp(A.data, min=0, max=1))
# evaluate
model.eval()
loss_val = []
with torch.no_grad():
for x in val_loader:
x = x.to(device)
loss = -model(x,A_hat.data)
loss_val.append(loss.item())
print('Epoch: {}, train -log_prob: {:.2f}, test -log_prob: {:.2f}, h: {}'\
.format(epoch, np.mean(loss_train), np.mean(loss_val), h.item()))
if np.mean(loss_val) < loss_best:
loss_best = np.mean(loss_val)
print("save model {} epoch".format(epoch))
torch.save(A.data,os.path.join(save_path, "graph_best.pt"))
torch.save(model.state_dict(), os.path.join(save_path, "{}_best.pt".format(args.name)))
print('rho: {}, alpha {}, h {}'.format(rho, alpha, h.item()))
print('===========================================')
torch.save(A.data,os.path.join(save_path, "graph_{}.pt".format(epoch)))
torch.save(model.state_dict(), os.path.join(save_path, "{}_{}.pt".format(args.name, epoch)))
del optimizer
torch.cuda.empty_cache()
if h.item() > 0.5 * h_A_old:
rho *= 10
else:
break
h_A_old = h.item()
alpha += rho*h.item()
if h_A_old <= h_tol or rho >=rho_max:
break
# %%
lr = args.lr * 0.1
optimizer = torch.optim.Adam([
{'params':model.parameters(), 'weight_decay':args.weight_decay},
{'params': [A]}], lr=lr, weight_decay=0.0)
# train
for _ in range(100):
loss_train = []
epoch += 1
model.train()
for x in train_loader:
x = x.to(device)
optimizer.zero_grad()
A_hat = torch.divide(A.T,A.sum(dim=1).detach()).T
loss = -model(x, A_hat)
h = torch.trace(torch.matrix_exp(A_hat*A_hat)) - n_sensor
total_loss = loss + 0.5 * rho * h * h + alpha * h
total_loss.backward()
clip_grad_value_(model.parameters(), 1)
optimizer.step()
loss_train.append(loss.item())
A.data.copy_(torch.clamp(A.data, min=0, max=1))
model.eval()
loss_val = []
print(A.max())
with torch.no_grad():
for x in val_loader:
x = x.to(device)
loss = -model(x,A_hat.data)
loss_val.append(loss.item())
print('Epoch: {}, train -log_prob: {:.2f}, test -log_prob: {:.2f}, h: {}'\
.format(epoch, np.mean(loss_train), np.mean(loss_val), h.item()))
if np.mean(loss_val) < loss_best:
loss_best = np.mean(loss_val)
print("save model {} epoch".format(epoch))
torch.save(A.data,os.path.join(save_path, "graph_best.pt"))
torch.save(model.state_dict(), os.path.join(save_path, "{}_best.pt".format(args.name)))
if epoch % args.log_interval==0:
torch.save(A.data,os.path.join(save_path, "graph_{}.pt".format(epoch)))
torch.save(model.state_dict(), os.path.join(save_path, "{}_{}.pt".format(args.name, epoch)))
#%%
| 7,602 | 34.036866 | 138 | py |
GANF | GANF-main/utils.py | #%%
import torch
def h(A):
return torch.trace(torch.matrix_exp(A*A)) - A.shape[0]
def normalize(A):
D = A.sum(dim=0)
D_inv = D.pow_(-1)
D_inv.masked_fill_(D_inv == float('inf'), 0)
return A * D_inv
def thresholding(A, thre):
return torch.where(A.abs()>thre, A, torch.scalar_tensor(0.0, dtype=torch.float32, device=A.device))
def binarize(A, thre):
return torch.where(A.abs()>thre, 1.0, 0.0)
# %%
import pandas as pd
def get_timestamp(stamps):
return (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")
# %%
import numpy as np
from sklearn.metrics import auc
def roc_auc(label_time, pred, negative_sample, sigma):
negative_sample = np.sort(negative_sample)[::-1]
thresholds = list(negative_sample[::int(len(negative_sample)/50)])
thresholds.append(negative_sample[-1])
tps=[]
fps=[]
for thre in thresholds:
pred_pos = pred[pred>thre]
tp = 0
for i in range(len(label_time)):
start_time = label_time[i] - pd.Timedelta(30, unit='min')
end_time = label_time[i] + pd.Timedelta(30, unit='min')
detected_event = pred_pos[str(start_time): str(end_time)]
if len(detected_event)>0:
timestamps = get_timestamp(detected_event.index)
delta_t = np.min(np.abs(timestamps.values - get_timestamp(label_time[i])))
tp += np.exp(-np.power(delta_t/sigma,2))
tp = tp/len(label_time)
tps.append(tp)
fp = (negative_sample>thre).sum()/len(negative_sample)
fps.append(fp)
return auc(fps,tps), (fps,tps)
# %%
def roc_auc_all(loss_np, delta_t, sigma):
ground_truth = np.exp(-np.power((delta_t.values)/sigma,2))
loss_sort = np.sort(loss_np)[::-1]
thresholds = list(loss_sort[::int(len(loss_sort)/50)])
thresholds.append(loss_sort[-1])
n_pos = ground_truth.sum()
n_neg = (1-ground_truth).sum()
tps = []
fps = []
for thre in thresholds:
pred_pos = loss_np>thre
tp = ground_truth[pred_pos].sum()/n_pos
fp = (1-ground_truth[pred_pos]).sum()/n_neg
tps.append(tp)
fps.append(fp)
auc_score = auc(fps, tps)
return auc_score, fps, tps | 2,221 | 29.027027 | 103 | py |
GANF | GANF-main/dataset.py | #%%
import pandas as pd
import torch
from torch.utils.data import Dataset
import numpy as np
# %%
from torch.utils.data import DataLoader
def load_traffic(root, batch_size):
"""
Load traffic dataset
return train_loader, val_loader, test_loader
"""
df = pd.read_hdf(root)
df = df.reset_index()
df = df.rename(columns={"index":"utc"})
df["utc"] = pd.to_datetime(df["utc"], unit="s")
df = df.set_index("utc")
n_sensor = len(df.columns)
mean = df.values.flatten().mean()
std = df.values.flatten().std()
df = (df - mean)/std
df = df.sort_index()
# split the dataset
train_df = df.iloc[:int(0.75*len(df))]
val_df = df.iloc[int(0.75*len(df)):int(0.875*len(df))]
test_df = df.iloc[int(0.75*len(df)):]
train_loader = DataLoader(Traffic(train_df), batch_size=batch_size, shuffle=True)
val_loader = DataLoader(Traffic(val_df), batch_size=batch_size, shuffle=False)
test_loader = DataLoader(Traffic(test_df), batch_size=batch_size, shuffle=False)
return train_loader, val_loader, test_loader, n_sensor
class Traffic(Dataset):
def __init__(self, df, window_size=12, stride_size=1):
super(Traffic, self).__init__()
self.df = df
self.window_size = window_size
self.stride_size = stride_size
self.data, self.idx, self.time = self.preprocess(df)
def preprocess(self, df):
start_idx = np.arange(0,len(df)-self.window_size,self.stride_size)
end_idx = np.arange(self.window_size, len(df), self.stride_size)
delat_time = df.index[end_idx]-df.index[start_idx]
idx_mask = delat_time==pd.Timedelta(5*self.window_size,unit='min')
return df.values, start_idx[idx_mask], df.index[start_idx[idx_mask]]
def __len__(self):
length = len(self.idx)
return length
def __getitem__(self, index):
# N X K X L X D
start = self.idx[index]
end = start + self.window_size
data = self.data[start:end].reshape([self.window_size,-1, 1])
return torch.FloatTensor(data).transpose(0,1)
def load_water(root, batch_size,label=False):
data = pd.read_csv(root)
data = data.rename(columns={"Normal/Attack":"label"})
data.label[data.label!="Normal"]=1
data.label[data.label=="Normal"]=0
data["Timestamp"] = pd.to_datetime(data["Timestamp"])
data = data.set_index("Timestamp")
#%%
feature = data.iloc[:,:51]
mean_df = feature.mean(axis=0)
std_df = feature.std(axis=0)
norm_feature = (feature-mean_df)/std_df
norm_feature = norm_feature.dropna(axis=1)
n_sensor = len(norm_feature.columns)
train_df = norm_feature.iloc[:int(0.6*len(data))]
train_label = data.label.iloc[:int(0.6*len(data))]
val_df = norm_feature.iloc[int(0.6*len(data)):int(0.8*len(data))]
val_label = data.label.iloc[int(0.6*len(data)):int(0.8*len(data))]
test_df = norm_feature.iloc[int(0.8*len(data)):]
test_label = data.label.iloc[int(0.8*len(data)):]
if label:
train_loader = DataLoader(WaterLabel(train_df,train_label), batch_size=batch_size, shuffle=True)
else:
train_loader = DataLoader(Water(train_df,train_label), batch_size=batch_size, shuffle=True)
val_loader = DataLoader(Water(val_df,val_label), batch_size=batch_size, shuffle=False)
test_loader = DataLoader(Water(test_df,test_label), batch_size=batch_size, shuffle=False)
return train_loader, val_loader, test_loader, n_sensor
class Water(Dataset):
def __init__(self, df, label, window_size=60, stride_size=10):
super(Water, self).__init__()
self.df = df
self.window_size = window_size
self.stride_size = stride_size
self.data, self.idx, self.label = self.preprocess(df,label)
def preprocess(self, df, label):
start_idx = np.arange(0,len(df)-self.window_size,self.stride_size)
end_idx = np.arange(self.window_size, len(df), self.stride_size)
delat_time = df.index[end_idx]-df.index[start_idx]
idx_mask = delat_time==pd.Timedelta(self.window_size,unit='s')
return df.values, start_idx[idx_mask], label[start_idx[idx_mask]]
def __len__(self):
length = len(self.idx)
return length
def __getitem__(self, index):
# N X K X L X D
start = self.idx[index]
end = start + self.window_size
data = self.data[start:end].reshape([self.window_size,-1, 1])
return torch.FloatTensor(data).transpose(0,1)
class WaterLabel(Dataset):
def __init__(self, df, label, window_size=60, stride_size=10):
super(WaterLabel, self).__init__()
self.df = df
self.window_size = window_size
self.stride_size = stride_size
self.data, self.idx, self.label = self.preprocess(df,label)
self.label = 1.0-2*self.label
def preprocess(self, df, label):
start_idx = np.arange(0,len(df)-self.window_size,self.stride_size)
end_idx = np.arange(self.window_size, len(df), self.stride_size)
delat_time = df.index[end_idx]-df.index[start_idx]
idx_mask = delat_time==pd.Timedelta(self.window_size,unit='s')
return df.values, start_idx[idx_mask], label[start_idx[idx_mask]]
def __len__(self):
length = len(self.idx)
return length
def __getitem__(self, index):
# N X K X L X D
start = self.idx[index]
end = start + self.window_size
data = self.data[start:end].reshape([self.window_size,-1, 1])
return torch.FloatTensor(data).transpose(0,1),self.label[index] | 5,624 | 31.894737 | 104 | py |
GANF | GANF-main/train_water.py | #%%
import os
import argparse
import torch
from models.GANF import GANF
import numpy as np
from sklearn.metrics import roc_auc_score
# from data import fetch_dataloaders
parser = argparse.ArgumentParser()
# files
parser.add_argument('--data_dir', type=str,
default='./data/SWaT_Dataset_Attack_v0.csv', help='Location of datasets.')
parser.add_argument('--output_dir', type=str,
default='./checkpoint/model')
parser.add_argument('--name',default='GANF_Water')
# restore
parser.add_argument('--graph', type=str, default='None')
parser.add_argument('--model', type=str, default='None')
parser.add_argument('--seed', type=int, default=18, help='Random seed to use.')
# made parameters
parser.add_argument('--n_blocks', type=int, default=1, help='Number of blocks to stack in a model (MADE in MAF; Coupling+BN in RealNVP).')
parser.add_argument('--n_components', type=int, default=1, help='Number of Gaussian clusters for mixture of gaussians models.')
parser.add_argument('--hidden_size', type=int, default=32, help='Hidden layer size for MADE (and each MADE block in an MAF).')
parser.add_argument('--n_hidden', type=int, default=1, help='Number of hidden layers in each MADE.')
parser.add_argument('--batch_norm', type=bool, default=False)
# training params
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--n_epochs', type=int, default=1)
parser.add_argument('--lr', type=float, default=2e-3, help='Learning rate.')
parser.add_argument('--log_interval', type=int, default=5, help='How often to show loss statistics and save samples.')
parser.add_argument('--h_tol', type=float, default=1e-4)
parser.add_argument('--rho_max', type=float, default=1e16)
parser.add_argument('--max_iter', type=int, default=20)
parser.add_argument('--lambda1', type=float, default=0.0)
parser.add_argument('--rho_init', type=float, default=1.0)
parser.add_argument('--alpha_init', type=float, default=0.0)
args = parser.parse_known_args()[0]
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
print(args)
import random
import numpy as np
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
#%%
print("Loading dataset")
from dataset import load_water
train_loader, val_loader, test_loader, n_sensor = load_water(args.data_dir, \
args.batch_size)
#%%
rho = args.rho_init
alpha = args.alpha_init
lambda1 = args.lambda1
h_A_old = np.inf
max_iter = args.max_iter
rho_max = args.rho_max
h_tol = args.h_tol
epoch = 0
# initialize A
if args.graph != 'None':
init = torch.load(args.graph).to(device).abs()
print("Load graph from "+args.graph)
else:
from torch.nn.init import xavier_uniform_
init = torch.zeros([n_sensor, n_sensor])
init = xavier_uniform_(init).abs()
init = init.fill_diagonal_(0.0)
A = torch.tensor(init, requires_grad=True, device=device)
#%%
model = GANF(args.n_blocks, 1, args.hidden_size, args.n_hidden, dropout=0.0, batch_norm=args.batch_norm)
model = model.to(device)
if args.model != 'None':
model.load_state_dict(torch.load(args.model))
print('Load model from '+args.model)
#%%
from torch.nn.utils import clip_grad_value_
import seaborn as sns
import matplotlib.pyplot as plt
save_path = os.path.join(args.output_dir,args.name)
if not os.path.exists(save_path):
os.makedirs(save_path)
loss_best = 100
for _ in range(max_iter):
while rho < rho_max:
lr = args.lr
optimizer = torch.optim.Adam([
{'params':model.parameters(), 'weight_decay':args.weight_decay},
{'params': [A]}], lr=lr, weight_decay=0.0)
for _ in range(args.n_epochs):
# train iteration
loss_train = []
epoch += 1
model.train()
for x in train_loader:
x = x.to(device)
optimizer.zero_grad()
loss = -model(x, A)
h = torch.trace(torch.matrix_exp( A* A)) - n_sensor
total_loss = loss + 0.5 * rho * h * h + alpha * h
total_loss.backward()
clip_grad_value_(model.parameters(), 1)
optimizer.step()
loss_train.append(loss.item())
A.data.copy_(torch.clamp(A.data, min=0, max=1))
# evaluate iteration
model.eval()
loss_val = []
with torch.no_grad():
for x in val_loader:
x = x.to(device)
loss = -model.test(x, A.data).cpu().numpy()
loss_val.append(loss)
loss_val = np.concatenate(loss_val)
loss_test = []
with torch.no_grad():
for x in test_loader:
x = x.to(device)
loss = -model.test(x, A.data).cpu().numpy()
loss_test.append(loss)
loss_test = np.concatenate(loss_test)
print(loss_val.max(), loss_val.min(), loss_test.max(), loss_test.min())
loss_val = np.nan_to_num(loss_val)
loss_test = np.nan_to_num(loss_test)
roc_val = roc_auc_score(np.asarray(val_loader.dataset.label.values,dtype=int),loss_val)
roc_test = roc_auc_score(np.asarray(test_loader.dataset.label.values,dtype=int),loss_test)
print('Epoch: {}, train -log_prob: {:.2f}, test -log_prob: {:.2f}, roc_val: {:.4f}, roc_test: {:.4f} ,h: {}'\
.format(epoch, np.mean(loss_train), np.mean(loss_val), roc_val, roc_test, h.item()))
print('rho: {}, alpha {}, h {}'.format(rho, alpha, h.item()))
print('===========================================')
torch.save(A.data,os.path.join(save_path, "graph_{}.pt".format(epoch)))
torch.save(model.state_dict(), os.path.join(save_path, "{}_{}.pt".format(args.name, epoch)))
del optimizer
torch.cuda.empty_cache()
if h.item() > 0.5 * h_A_old:
rho *= 10
else:
break
h_A_old = h.item()
alpha += rho*h.item()
if h_A_old <= h_tol or rho >=rho_max:
break
# %%
lr = args.lr
optimizer = torch.optim.Adam([
{'params':model.parameters(), 'weight_decay':args.weight_decay},
{'params': [A]}], lr=lr, weight_decay=0.0)
for _ in range(30):
loss_train = []
epoch += 1
model.train()
for x in train_loader:
x = x.to(device)
optimizer.zero_grad()
loss = -model(x, A)
h = torch.trace(torch.matrix_exp(A*A)) - n_sensor
total_loss = loss + 0.5 * rho * h * h + alpha * h
total_loss.backward()
clip_grad_value_(model.parameters(), 1)
optimizer.step()
loss_train.append(loss.item())
A.data.copy_(torch.clamp(A.data, min=0, max=1))
# eval
model.eval()
loss_val = []
with torch.no_grad():
for x in val_loader:
x = x.to(device)
loss = -model.test(x, A.data).cpu().numpy()
loss_val.append(loss)
loss_val = np.concatenate(loss_val)
loss_test = []
with torch.no_grad():
for x in test_loader:
x = x.to(device)
loss = -model.test(x, A.data).cpu().numpy()
loss_test.append(loss)
loss_test = np.concatenate(loss_test)
loss_val = np.nan_to_num(loss_val)
loss_test = np.nan_to_num(loss_test)
roc_val = roc_auc_score(np.asarray(val_loader.dataset.label.values,dtype=int),loss_val)
roc_test = roc_auc_score(np.asarray(test_loader.dataset.label.values,dtype=int),loss_test)
print('Epoch: {}, train -log_prob: {:.2f}, test -log_prob: {:.2f}, roc_val: {:.4f}, roc_test: {:.4f} ,h: {}'\
.format(epoch, np.mean(loss_train), np.mean(loss_val), roc_val, roc_test, h.item()))
if np.mean(loss_val) < loss_best:
loss_best = np.mean(loss_val)
print("save model {} epoch".format(epoch))
torch.save(A.data,os.path.join(save_path, "graph_best.pt"))
torch.save(model.state_dict(), os.path.join(save_path, "{}_best.pt".format(args.name)))
if epoch % args.log_interval==0:
torch.save(A.data,os.path.join(save_path, "graph_{}.pt".format(epoch)))
torch.save(model.state_dict(), os.path.join(save_path, "{}_{}.pt".format(args.name, epoch))) | 8,511 | 34.319502 | 138 | py |
GANF | GANF-main/eval_water.py | #%%
import os
import argparse
import torch
from models.GANF import GANF
import numpy as np
from sklearn.metrics import roc_auc_score
# from data import fetch_dataloaders
parser = argparse.ArgumentParser()
# files
parser.add_argument('--data_dir', type=str,
default='./data/SWaT_Dataset_Attack_v0.csv', help='Location of datasets.')
parser.add_argument('--output_dir', type=str,
default='/home/enyandai/code/checkpoint/model')
parser.add_argument('--name',default='GANF_Water')
# restore
parser.add_argument('--graph', type=str, default='None')
parser.add_argument('--model', type=str, default='None')
parser.add_argument('--seed', type=int, default=10, help='Random seed to use.')
# made parameters
parser.add_argument('--n_blocks', type=int, default=1, help='Number of blocks to stack in a model (MADE in MAF; Coupling+BN in RealNVP).')
parser.add_argument('--n_components', type=int, default=1, help='Number of Gaussian clusters for mixture of gaussians models.')
parser.add_argument('--hidden_size', type=int, default=32, help='Hidden layer size for MADE (and each MADE block in an MAF).')
parser.add_argument('--n_hidden', type=int, default=1, help='Number of hidden layers in each MADE.')
parser.add_argument('--batch_norm', type=bool, default=False)
# training params
parser.add_argument('--batch_size', type=int, default=512)
args = parser.parse_known_args()[0]
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
print(args)
import random
import numpy as np
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
#%%
print("Loading dataset")
from dataset import load_water
train_loader, val_loader, test_loader, n_sensor = load_water(args.data_dir, \
args.batch_size)
#%%
model = GANF(args.n_blocks, 1, args.hidden_size, args.n_hidden, dropout=0.0, batch_norm=args.batch_norm)
model = model.to(device)
model.load_state_dict(torch.load("./checkpoint/eval/water/GANF_water_seed_18_best.pt"))
A = torch.load("./checkpoint/eval/GANF_water_seed_18/graph_best.pt").to(device)
model.eval()
#%%
loss_test = []
with torch.no_grad():
for x in test_loader:
x = x.to(device)
loss = -model.test(x, A.data).cpu().numpy()
loss_test.append(loss)
loss_test = np.concatenate(loss_test)
roc_test = roc_auc_score(np.asarray(test_loader.dataset.label.values,dtype=int),loss_test)
print("The ROC score on SWaT dataset is {}".format(roc_test))
# %%
| 2,593 | 35.535211 | 138 | py |
GANF | GANF-main/models/RNN.py | #%%
import torch
import torch.nn as nn
from functools import partial
class RecurrentEncoder(nn.Module):
"""Recurrent encoder"""
def __init__(self, n_features, latent_dim, rnn):
super().__init__()
self.rec_enc1 = rnn(n_features, latent_dim, batch_first=True)
def forward(self, x):
_, h_n = self.rec_enc1(x)
return h_n
class RecurrentDecoder(nn.Module):
"""Recurrent decoder for RNN and GRU"""
def __init__(self, latent_dim, n_features, rnn_cell, device):
super().__init__()
self.n_features = n_features
self.device = device
self.rec_dec1 = rnn_cell(n_features, latent_dim)
self.dense_dec1 = nn.Linear(latent_dim, n_features)
def forward(self, h_0, seq_len):
# Initialize output
x = torch.tensor([], device = self.device)
# Squeezing
h_i = h_0.squeeze()
# Reconstruct first element with encoder output
x_i = self.dense_dec1(h_i)
# Reconstruct remaining elements
for i in range(0, seq_len):
h_i = self.rec_dec1(x_i, h_i)
x_i = self.dense_dec1(h_i)
x = torch.cat([x, x_i], axis=1)
return x.view(-1, seq_len, self.n_features)
class RecurrentDecoderLSTM(nn.Module):
"""Recurrent decoder LSTM"""
def __init__(self, latent_dim, n_features, rnn_cell, device):
super().__init__()
self.n_features = n_features
self.device = device
self.rec_dec1 = rnn_cell(n_features, latent_dim)
self.dense_dec1 = nn.Linear(latent_dim, n_features)
def forward(self, h_0, seq_len):
# Initialize output
x = torch.tensor([], device = self.device)
# Squeezing
h_i = [h.squeeze() for h in h_0]
# Reconstruct first element with encoder output
x_i = self.dense_dec1(h_i[0])
# Reconstruct remaining elements
for i in range(0, seq_len):
h_i = self.rec_dec1(x_i, h_i)
x_i = self.dense_dec1(h_i[0])
x = torch.cat([x, x_i], axis = 1)
return x.view(-1, seq_len, self.n_features)
class RecurrentAE(nn.Module):
"""Recurrent autoencoder"""
def __init__(self, n_features, latent_dim, device):
super().__init__()
# Encoder and decoder argsuration
self.rnn, self.rnn_cell = nn.LSTM, nn.LSTMCell
self.decoder = RecurrentDecoderLSTM
self.latent_dim = latent_dim
self.n_features = n_features
self.device = device
# Encoder and decoder
self.encoder = RecurrentEncoder(self.n_features, self.latent_dim, self.rnn)
self.decoder = self.decoder(self.latent_dim, self.n_features, self.rnn_cell, self.device)
def forward(self, x):
# x: N X K X L X D
seq_len = x.shape[1]
h_n = self.encoder(x)
out = self.decoder(h_n, seq_len)
return torch.flip(out, [1])
# %%
| 2,929 | 26.904762 | 97 | py |
GANF | GANF-main/models/graph_layer.py | import torch
from torch.nn import Parameter, Linear, Sequential, BatchNorm1d, ReLU
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import remove_self_loops, add_self_loops, softmax
from torch_geometric.nn.inits import glorot, zeros
class GraphLayer(MessagePassing):
def __init__(self, in_channels, out_channels, heads=1, concat=True,
negative_slope=0.2, dropout=0, bias=True, inter_dim=-1,**kwargs):
super(GraphLayer, self).__init__(aggr='add', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.__alpha__ = None
self.lin = Linear(in_channels, heads * out_channels, bias=False)
self.att_i = Parameter(torch.Tensor(1, heads, out_channels))
self.att_j = Parameter(torch.Tensor(1, heads, out_channels))
self.att_em_i = Parameter(torch.Tensor(1, heads, out_channels))
self.att_em_j = Parameter(torch.Tensor(1, heads, out_channels))
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.lin.weight)
glorot(self.att_i)
glorot(self.att_j)
zeros(self.att_em_i)
zeros(self.att_em_j)
zeros(self.bias)
def forward(self, x, edge_index, embedding, return_attention_weights=False):
""""""
if torch.is_tensor(x):
x = self.lin(x)
x = (x, x)
else:
x = (self.lin(x[0]), self.lin(x[1]))
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index,
num_nodes=x[1].size(self.node_dim))
out = self.propagate(edge_index, x=x, embedding=embedding, edges=edge_index,
return_attention_weights=return_attention_weights)
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out = out + self.bias
if return_attention_weights:
alpha, self.__alpha__ = self.__alpha__, None
return out, (edge_index, alpha)
else:
return out
def message(self, x_i, x_j, edge_index_i, size_i,
embedding,
edges,
return_attention_weights):
x_i = x_i.view(-1, self.heads, self.out_channels)
x_j = x_j.view(-1, self.heads, self.out_channels)
if embedding is not None:
embedding_i, embedding_j = embedding[edge_index_i], embedding[edges[0]]
embedding_i = embedding_i.unsqueeze(1).repeat(1,self.heads,1)
embedding_j = embedding_j.unsqueeze(1).repeat(1,self.heads,1)
key_i = torch.cat((x_i, embedding_i), dim=-1)
key_j = torch.cat((x_j, embedding_j), dim=-1)
cat_att_i = torch.cat((self.att_i, self.att_em_i), dim=-1)
cat_att_j = torch.cat((self.att_j, self.att_em_j), dim=-1)
alpha = (key_i * cat_att_i).sum(-1) + (key_j * cat_att_j).sum(-1)
alpha = alpha.view(-1, self.heads, 1)
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index_i, num_nodes=size_i)
if return_attention_weights:
self.__alpha__ = alpha
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return x_j * alpha.view(-1, self.heads, 1)
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
| 4,099 | 32.333333 | 84 | py |
GANF | GANF-main/models/NF.py | #%%
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import math
import copy
# --------------------
# Model layers and helpers
# --------------------
def create_masks(input_size, hidden_size, n_hidden, input_order='sequential', input_degrees=None):
# MADE paper sec 4:
# degrees of connections between layers -- ensure at most in_degree - 1 connections
degrees = []
# set input degrees to what is provided in args (the flipped order of the previous layer in a stack of mades);
# else init input degrees based on strategy in input_order (sequential or random)
if input_size>1:
if input_order == 'sequential':
degrees += [torch.arange(input_size)] if input_degrees is None else [input_degrees]
for _ in range(n_hidden + 1):
degrees += [torch.arange(hidden_size) % (input_size - 1)]
degrees += [torch.arange(input_size) % input_size - 1] if input_degrees is None else [input_degrees % input_size - 1]
elif input_order == 'random':
degrees += [torch.randperm(input_size)] if input_degrees is None else [input_degrees]
for _ in range(n_hidden + 1):
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (hidden_size,))]
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (input_size,)) - 1] if input_degrees is None else [input_degrees - 1]
else:
degrees += [torch.zeros([1]).long()]
for _ in range(n_hidden+1):
degrees += [torch.zeros([hidden_size]).long()]
degrees += [torch.zeros([input_size]).long()]
# construct masks
masks = []
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [(d1.unsqueeze(-1) >= d0.unsqueeze(0)).float()]
return masks, degrees[0]
#%%
def create_masks_pmu(input_size, hidden_size, n_hidden, input_order='sequential', input_degrees=None):
# MADE paper sec 4:
# degrees of connections between layers -- ensure at most in_degree - 1 connections
degrees = []
# set input degrees to what is provided in args (the flipped order of the previous layer in a stack of mades);
# else init input degrees based on strategy in input_order (sequential or random)
if input_order == 'sequential':
degrees += [torch.arange(input_size)] if input_degrees is None else [input_degrees]
for _ in range(n_hidden + 1):
degrees += [torch.arange(hidden_size) % (input_size - 1)]
degrees += [torch.arange(input_size) % input_size - 1] if input_degrees is None else [input_degrees % input_size - 1]
# construct masks
masks = []
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [(d1.unsqueeze(-1) >= d0.unsqueeze(0)).float()]
masks[0] = masks[0].repeat_interleave(3, dim=1)
masks[-1] = masks[-1].repeat_interleave(3, dim=0)
return masks, degrees[0]
#%%
class MaskedLinear(nn.Linear):
""" MADE building block layer """
def __init__(self, input_size, n_outputs, mask, cond_label_size=None):
super().__init__(input_size, n_outputs)
self.register_buffer('mask', mask)
self.cond_label_size = cond_label_size
if cond_label_size is not None:
self.cond_weight = nn.Parameter(torch.rand(n_outputs, cond_label_size) / math.sqrt(cond_label_size))
def forward(self, x, y=None):
out = F.linear(x, self.weight * self.mask, self.bias)
if y is not None:
out = out + F.linear(y, self.cond_weight)
return out
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
) + (self.cond_label_size != None) * ', cond_features={}'.format(self.cond_label_size)
class LinearMaskedCoupling(nn.Module):
""" Modified RealNVP Coupling Layers per the MAF paper """
def __init__(self, input_size, hidden_size, n_hidden, mask, cond_label_size=None):
super().__init__()
self.register_buffer('mask', mask)
# scale function
s_net = [nn.Linear(input_size + (cond_label_size if cond_label_size is not None else 0), hidden_size)]
for _ in range(n_hidden):
s_net += [nn.Tanh(), nn.Linear(hidden_size, hidden_size)]
s_net += [nn.Tanh(), nn.Linear(hidden_size, input_size)]
self.s_net = nn.Sequential(*s_net)
# translation function
self.t_net = copy.deepcopy(self.s_net)
# replace Tanh with ReLU's per MAF paper
for i in range(len(self.t_net)):
if not isinstance(self.t_net[i], nn.Linear): self.t_net[i] = nn.ReLU()
def forward(self, x, y=None):
# apply mask
mx = x * self.mask
# run through model
s = self.s_net(mx if y is None else torch.cat([y, mx], dim=1))
t = self.t_net(mx if y is None else torch.cat([y, mx], dim=1))
u = mx + (1 - self.mask) * (x - t) * torch.exp(-s) # cf RealNVP eq 8 where u corresponds to x (here we're modeling u)
log_abs_det_jacobian = - (1 - self.mask) * s # log det du/dx; cf RealNVP 8 and 6; note, sum over input_size done at model log_prob
return u, log_abs_det_jacobian
def inverse(self, u, y=None):
# apply mask
mu = u * self.mask
# run through model
s = self.s_net(mu if y is None else torch.cat([y, mu], dim=1))
t = self.t_net(mu if y is None else torch.cat([y, mu], dim=1))
x = mu + (1 - self.mask) * (u * s.exp() + t) # cf RealNVP eq 7
log_abs_det_jacobian = (1 - self.mask) * s # log det dx/du
return x, log_abs_det_jacobian
class BatchNorm(nn.Module):
""" RealNVP BatchNorm layer """
def __init__(self, input_size, momentum=0.9, eps=1e-5):
super().__init__()
self.momentum = momentum
self.eps = eps
self.log_gamma = nn.Parameter(torch.zeros(input_size))
self.beta = nn.Parameter(torch.zeros(input_size))
self.register_buffer('running_mean', torch.zeros(input_size))
self.register_buffer('running_var', torch.ones(input_size))
def forward(self, x, cond_y=None):
if self.training:
self.batch_mean = x.mean(0)
self.batch_var = x.var(0) # note MAF paper uses biased variance estimate; ie x.var(0, unbiased=False)
# update running mean
self.running_mean.mul_(self.momentum).add_(self.batch_mean.data * (1 - self.momentum))
self.running_var.mul_(self.momentum).add_(self.batch_var.data * (1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
# compute normalized input (cf original batch norm paper algo 1)
x_hat = (x - mean) / torch.sqrt(var + self.eps)
y = self.log_gamma.exp() * x_hat + self.beta
# compute log_abs_det_jacobian (cf RealNVP paper)
log_abs_det_jacobian = self.log_gamma - 0.5 * torch.log(var + self.eps)
# print('in sum log var {:6.3f} ; out sum log var {:6.3f}; sum log det {:8.3f}; mean log_gamma {:5.3f}; mean beta {:5.3f}'.format(
# (var + self.eps).log().sum().data.numpy(), y.var(0).log().sum().data.numpy(), log_abs_det_jacobian.mean(0).item(), self.log_gamma.mean(), self.beta.mean()))
return y, log_abs_det_jacobian.expand_as(x)
def inverse(self, y, cond_y=None):
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (y - self.beta) * torch.exp(-self.log_gamma)
x = x_hat * torch.sqrt(var + self.eps) + mean
log_abs_det_jacobian = 0.5 * torch.log(var + self.eps) - self.log_gamma
return x, log_abs_det_jacobian.expand_as(x)
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x, y):
sum_log_abs_det_jacobians = 0
for module in self:
x, log_abs_det_jacobian = module(x, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return x, sum_log_abs_det_jacobians
def inverse(self, u, y):
sum_log_abs_det_jacobians = 0
for module in reversed(self):
u, log_abs_det_jacobian = module.inverse(u, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return u, sum_log_abs_det_jacobians
# --------------------
# Models
# --------------------
class MADE(nn.Module):
def __init__(self, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', input_degrees=None):
"""
Args:
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of mades
conditional -- bool; whether model is conditional
"""
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks(input_size, hidden_size, n_hidden, input_order, input_degrees)
# setup activation
if activation == 'relu':
activation_fn = nn.ReLU()
elif activation == 'tanh':
activation_fn = nn.Tanh()
else:
raise ValueError('Check activation function.')
# construct model
self.net_input = MaskedLinear(input_size, hidden_size, masks[0], cond_label_size)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [activation_fn, MaskedLinear(hidden_size, 2 * input_size, masks[-1].repeat(2,1))]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# MAF eq 4 -- return mean and log std
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)
u = (x - m) * torch.exp(-loga)
# MAF eq 5
log_abs_det_jacobian = - loga
return u, log_abs_det_jacobian
def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):
# MAF eq 3
D = u.shape[1]
x = torch.zeros_like(u)
# run through reverse model
for i in self.input_degrees:
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)
x[:,i] = u[:,i] * torch.exp(loga[:,i]) + m[:,i]
log_abs_det_jacobian = loga
return x, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1)
class MADE_Full(nn.Module):
def __init__(self, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', input_degrees=None):
"""
Args:
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of mades
conditional -- bool; whether model is conditional
"""
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks_pmu(int(input_size/3), hidden_size, n_hidden, input_order, input_degrees)
# setup activation
if activation == 'relu':
activation_fn = nn.ReLU()
elif activation == 'tanh':
activation_fn = nn.Tanh()
else:
raise ValueError('Check activation function.')
# construct model
self.net_input = MaskedLinear(input_size, hidden_size, masks[0], cond_label_size)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [activation_fn, MaskedLinear(hidden_size, 2 * input_size, masks[-1].repeat(2,1))]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# MAF eq 4 -- return mean and log std
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)
u = (x - m) * torch.exp(-loga)
# MAF eq 5
log_abs_det_jacobian = - loga
return u, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1)
class MAF(nn.Module):
def __init__(self, n_blocks, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# construct model
modules = []
self.input_degrees = None
for i in range(n_blocks):
modules += [MADE(input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, self.input_degrees)]
self.input_degrees = modules[-1].input_degrees.flip(0)
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, x, y=None):
u, sum_log_abs_det_jacobians = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
class MAF_Full(nn.Module):
def __init__(self, n_blocks, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# construct model
modules = []
self.input_degrees = None
for i in range(n_blocks):
modules += [MADE_Full(input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, self.input_degrees)]
self.input_degrees = modules[-1].input_degrees.flip(0)
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, x, y=None):
u, sum_log_abs_det_jacobians = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
class RealNVP(nn.Module):
def __init__(self, n_blocks, input_size, hidden_size, n_hidden, cond_label_size=None, batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# construct model
modules = []
mask = torch.arange(input_size).float() % 2
for i in range(n_blocks):
modules += [LinearMaskedCoupling(input_size, hidden_size, n_hidden, mask, cond_label_size)]
mask = 1 - mask
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, x, y=None):
u, sum_log_abs_det_jacobians = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
| 17,494 | 39.780886 | 169 | py |
GANF | GANF-main/models/DROCC.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class LSTM_FC(nn.Module):
def __init__(self,
input_dim=32,
num_classes=1,
num_hidden_nodes=8
):
super(LSTM_FC, self).__init__()
self.input_dim = input_dim
self.num_classes = num_classes
self.num_hidden_nodes = num_hidden_nodes
self.encoder = nn.LSTM(input_size=self.input_dim, hidden_size=self.num_hidden_nodes,
num_layers=1, batch_first=True)
self.fc = nn.Linear(self.num_hidden_nodes, self.num_classes)
activ = nn.ReLU(True)
def forward(self, input):
features = self.encoder(input)[0][:,-1,:]
# pdb.set_trace()
logits = self.fc(features)
return logits
def half_forward_start(self, x):
features = self.encoder(x)[0][:,-1,:]
return features
def half_forward_end(self, x):
logits = self.fc(x)
return logits
class DROCCTrainer:
"""
Trainer class that implements the DROCC algorithm proposed in
https://arxiv.org/abs/2002.12718
"""
def __init__(self, model, optimizer, lamda, radius, gamma, device):
"""Initialize the DROCC Trainer class
Parameters
----------
model: Torch neural network object
optimizer: Total number of epochs for training.
lamda: Weight given to the adversarial loss
radius: Radius of hypersphere to sample points from.
gamma: Parameter to vary projection.
device: torch.device object for device to use.
"""
self.model = model
self.optimizer = optimizer
self.lamda = lamda
self.radius = radius
self.gamma = gamma
self.device = device
def train(self, train_loader, lr_scheduler, total_epochs, save_path, name,
only_ce_epochs=5, ascent_step_size=0.001, ascent_num_steps=50):
"""Trains the model on the given training dataset with periodic
evaluation on the validation dataset.
Parameters
----------
train_loader: Dataloader object for the training dataset.
val_loader: Dataloader object for the validation dataset.
learning_rate: Initial learning rate for training.
total_epochs: Total number of epochs for training.
only_ce_epochs: Number of epochs for initial pretraining.
ascent_step_size: Step size for gradient ascent for adversarial
generation of negative points.
ascent_num_steps: Number of gradient ascent steps for adversarial
generation of negative points.
metric: Metric used for evaluation (AUC / F1).
"""
self.ascent_num_steps = ascent_num_steps
self.ascent_step_size = ascent_step_size
for epoch in range(total_epochs):
#Make the weights trainable
self.model.train()
#Placeholder for the respective 2 loss values
epoch_adv_loss = 0.0 #AdvLoss
epoch_ce_loss = 0.0 #Cross entropy Loss
batch_idx = -1
for data in train_loader:
batch_idx += 1
data = data.to(self.device)
target = torch.ones([data.shape[0]], dtype=torch.float32).to(self.device)
data = torch.transpose(data, dim0=1, dim1=2)
data = data.reshape(data.shape[0], data.shape[1], data.shape[2]*data.shape[3])
self.optimizer.zero_grad()
# Extract the logits for cross entropy loss
logits = self.model(data)
logits = torch.squeeze(logits, dim = 1)
ce_loss = F.binary_cross_entropy_with_logits(logits, target)
# Add to the epoch variable for printing average CE Loss
epoch_ce_loss += ce_loss.item()
'''
Adversarial Loss is calculated only for the positive data points (label==1).
'''
if epoch >= only_ce_epochs:
data = data[target == 1]
# AdvLoss
adv_loss = self.one_class_adv_loss(data)
epoch_adv_loss += adv_loss.item()
loss = ce_loss + adv_loss * self.lamda
else:
# If only CE based training has to be done
loss = ce_loss
# Backprop
loss.backward()
self.optimizer.step()
lr_scheduler.step()
epoch_ce_loss = epoch_ce_loss/(batch_idx + 1) #Average CE Loss
epoch_adv_loss = epoch_adv_loss/(batch_idx + 1) #Average AdvLoss
print('Epoch: {}, CE Loss: {}, AdvLoss: {}'.format(
epoch, epoch_ce_loss, epoch_adv_loss))
self.save(os.path.join(save_path, "{}_{}.pt".format(name, epoch)))
def test(self, test_loader):
"""Evaluate the model on the given test dataset.
Parameters
----------
test_loader: Dataloader object for the test dataset.
metric: Metric used for evaluation (AUC / F1).
"""
self.model.eval()
scores = []
with torch.no_grad():
for data in test_loader:
data = data.to(self.device)
data = torch.transpose(data, dim0=1, dim1=2)
data = data.reshape(data.shape[0], data.shape[1], data.shape[2]*data.shape[3])
logits = self.model(data).cpu().numpy()
scores.append(logits)
scores = -np.concatenate(scores)
return scores
def one_class_adv_loss(self, x_train_data):
"""Computes the adversarial loss:
1) Sample points initially at random around the positive training
data points
2) Gradient ascent to find the most optimal point in set N_i(r)
classified as +ve (label=0). This is done by maximizing
the CE loss wrt label 0
3) Project the points between spheres of radius R and gamma * R
(set N_i(r))
4) Pass the calculated adversarial points through the model,
and calculate the CE loss wrt target class 0
Parameters
----------
x_train_data: Batch of data to compute loss on.
"""
batch_size = len(x_train_data)
# Randomly sample points around the training data
# We will perform SGD on these to find the adversarial points
x_adv = torch.randn(x_train_data.shape).to(self.device).detach().requires_grad_()
x_adv_sampled = x_adv + x_train_data
for step in range(self.ascent_num_steps):
with torch.enable_grad():
new_targets = torch.zeros(batch_size, 1).to(self.device)
new_targets = torch.squeeze(new_targets)
new_targets = new_targets.to(torch.float)
logits = self.model(x_adv_sampled)
logits = torch.squeeze(logits, dim = 1)
new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
grad_norm = torch.norm(grad, p=2, dim = tuple(range(1, grad.dim())))
grad_norm = grad_norm.view(-1, *[1]*(grad.dim()-1))
grad_normalized = grad/grad_norm
with torch.no_grad():
x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
if (step + 1) % 10==0:
# Project the normal points to the set N_i(r)
h = x_adv_sampled - x_train_data
norm_h = torch.sqrt(torch.sum(h**2,
dim=tuple(range(1, h.dim()))))
alpha = torch.clamp(norm_h, self.radius,
self.gamma * self.radius).to(self.device)
# Make use of broadcast to project h
proj = (alpha/norm_h).view(-1, *[1] * (h.dim()-1))
h = proj * h
x_adv_sampled = x_train_data + h #These adv_points are now on the surface of hyper-sphere
adv_pred = self.model(x_adv_sampled)
adv_pred = torch.squeeze(adv_pred, dim=1)
adv_loss = F.binary_cross_entropy_with_logits(adv_pred, (new_targets * 0))
return adv_loss
def save(self, path):
torch.save(self.model.state_dict(),path)
def load(self, path):
self.model.load_state_dict(torch.load(path)) | 8,730 | 39.235023 | 106 | py |
GANF | GANF-main/models/DeepSAD.py |
#%%
import json
import torch
import logging
import time
import torch
import torch.optim as optim
class AETrainer:
def __init__(self, device: str = 'cuda'):
self.device = device
def train(self, train_loader, ae_net, args):
logger = logging.getLogger()
# Set device for network
ae_net = ae_net.to(self.device)
# Set optimizer (Adam optimizer for now)
optimizer = optim.Adam(ae_net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# Training
print('Starting pretraining...')
start_time = time.time()
ae_net.train()
for epoch in range(10):
loss_epoch = 0.0
n_batches = 0
epoch_start_time = time.time()
for data in train_loader:
if isinstance(data, list):
data = data[0]
x = data.to(self.device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
# Zero the network parameter gradients
optimizer.zero_grad()
# Update network parameters via backpropagation: forward + backward + optimize
outputs = ae_net(inputs)
scores = torch.sum((outputs - inputs) ** 2, dim=tuple(range(1, outputs.dim())))
loss = torch.mean(scores)
loss.backward()
optimizer.step()
loss_epoch += loss.item()
n_batches += 1
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
print(' Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}'
.format(epoch + 1, 10, epoch_train_time, loss_epoch / n_batches))
pretrain_time = time.time() - start_time
print('Pretraining time: %.3f' % pretrain_time)
print('Finished pretraining.')
return ae_net
class DeepSVDDTrainer:
def __init__(self, device: str = 'cuda'):
self.device = device
# Deep SVDD parameters
self.c = None
def train(self, train_loader, net, args):
self.args = args
# Set device for network
net = net.to(self.device)
# Set optimizer (Adam optimizer for now)
optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# Set learning rate scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20], gamma=0.1)
# Initialize hypersphere center c (if c not loaded)
if self.c is None:
print('Initializing center c...')
self.c = self.init_center_c(train_loader, net)
print(self.c.shape)
print('Center c initialized.')
# Training
print('Starting training...')
start_time = time.time()
net.train()
save_path = os.path.join(args.output_dir,args.name)
if not os.path.exists(save_path):
os.makedirs(save_path)
for epoch in range(args.n_epochs):
scheduler.step()
loss_epoch = 0.0
n_batches = 0
epoch_start_time = time.time()
for data in train_loader:
x = data.to(self.device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
# Zero the network parameter gradients
optimizer.zero_grad()
# Update network parameters via backpropagation: forward + backward + optimize
outputs = net(inputs).squeeze().mean(dim=-1)
dist = torch.sum((outputs - self.c) ** 2, dim=1)
loss = torch.mean(dist)
loss.backward()
optimizer.step()
loss_epoch += loss.item()
n_batches += 1
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
print(' Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}'
.format(epoch + 1, args.n_epochs, epoch_train_time, loss_epoch / n_batches))
torch.save({'c': self.c, 'net_dict': net.state_dict()}, os.path.join(save_path, "{}_{}.pt".format(args.name, epoch)))
self.train_time = time.time() - start_time
print('Training time: %.3f' % self.train_time)
print('Finished training.')
return net
def init_center_c(self, train_loader, net, eps=0.1):
"""Initialize hypersphere center c as the mean from an initial forward pass on the data."""
n_samples = 0
c = 0.0
net.eval()
with torch.no_grad():
for data in train_loader:
# get the inputs of the batch
x = data.to(self.device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
outputs = net(inputs).squeeze()
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
# If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
return c.mean(dim=-1)
from models.RNN import RecurrentAE
from models.GAN import CNNAE
class DeepSVDD(object):
def __init__(self, n_features, hidden_size, device):
self.c = None # hypersphere center c
self.trainer = None
# if encoder=='RNN':
# self.ae_net = RecurrentAE(n_features, hidden_size, device)
self.ae_net = CNNAE(n_features, hidden_size).to(device)
self.net = self.ae_net.encoder
self.ae_trainer = None
self.results = {
'test_auc': None
}
def train(self, dataset, args, device: str = 'cuda'):
"""Trains the Deep SVDD model on the training data."""
self.trainer = DeepSVDDTrainer(device=device)
# Get the model
self.trainer.train(dataset, self.net, args)
self.c = self.trainer.c
def test(self, test_loader, delta_t, sigma, device):
from utils import roc_auc_all
import numpy as np
self.net.eval()
self.net.to(device)
loss = []
with torch.no_grad():
for data in test_loader:
x = data.to(device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
outputs = self.net(inputs).squeeze().mean(dim=-1)
batch_loss= torch.sum((outputs - self.c) ** 2, dim=1).cpu().numpy()
loss.append(batch_loss)
loss = np.concatenate(loss)
auc_score, fps,tps = roc_auc_all(loss, delta_t, sigma)
print("meann: {:.4f}, median: {:.4f}, auc:{:.4f}".format(np.mean(loss), np.median(loss),auc_score))# %%
self.results['test_auc'] = auc_score
return auc_score, fps,tps
def pretrain(self, train_loader, args, device):
"""Pretrains the weights for the Deep SVDD network \phi via autoencoder."""
self.ae_trainer = AETrainer(device=device)
self.ae_net = self.ae_trainer.train(train_loader, self.ae_net, args)
self.net = self.ae_net.encoder
def save_model(self, export_model):
"""Save Deep SVDD model to export_model."""
net_dict = self.net.state_dict()
torch.save({'c': self.c,
'net_dict': net_dict}, export_model)
def load_model(self, model_path):
"""Load Deep SVDD model from model_path."""
model_dict = torch.load(model_path)
self.c = model_dict['c']
self.net.load_state_dict(model_dict['net_dict'])
def save_results(self, export_json):
"""Save results dict to a JSON-file."""
with open(export_json, 'w') as fp:
json.dump(self.results, fp)
# %%
import os
class DeepSADTrainer:
def __init__(self, device: str = 'cuda'):
self.device = device
self.c = None
def train(self, train_loader, net, args):
self.args = args
# Set device for network
net = net.to(self.device)
# Set optimizer (Adam optimizer for now)
optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# Set learning rate scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20], gamma=0.1)
# Initialize hypersphere center c (if c not loaded)
if self.c is None:
print('Initializing center c...')
self.c = self.init_center_c(train_loader, net)
print('Center c initialized.')
# Training
print('Starting training...')
start_time = time.time()
net.train()
save_path = os.path.join(args.output_dir,args.name)
if not os.path.exists(save_path):
os.makedirs(save_path)
for epoch in range(args.n_epochs):
loss_epoch = 0.0
n_batches = 0
epoch_start_time = time.time()
for data, semi_targets in train_loader:
x = data.to(self.device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
semi_targets = semi_targets.to(self.device)
# Zero the network parameter gradients
optimizer.zero_grad()
# Update network parameters via backpropagation: forward + backward + optimize
outputs = net(inputs).squeeze().mean(dim=-1)
dist = torch.sum((outputs - self.c) ** 2, dim=1)
losses = torch.where(semi_targets == 0, dist, args.eta * ((dist + 1e-6) ** semi_targets.float()))
loss = torch.mean(losses)
loss.backward()
optimizer.step()
loss_epoch += loss.item()
n_batches += 1
scheduler.step()
# log epoch statistics
epoch_train_time = time.time() - epoch_start_time
print(' Epoch {}/{}\t Time: {:.3f}\t Loss: {:.8f}'
.format(epoch + 1, args.n_epochs, epoch_train_time, loss_epoch / n_batches))
torch.save({'c': self.c, 'net_dict': net.state_dict()}, os.path.join(save_path, "{}_{}.pt".format(args.name, epoch)))
self.train_time = time.time() - start_time
print('Training time: %.3f' % self.train_time)
print('Finished training.')
return net
def init_center_c(self, train_loader, net, eps=0.1):
"""Initialize hypersphere center c as the mean from an initial forward pass on the data."""
n_samples = 0
c = 0.0
net.eval()
with torch.no_grad():
for data, _ in train_loader:
# get the inputs of the batch
x = data.to(self.device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
outputs = net(inputs).squeeze()
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
# If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
return c.mean(dim=-1)
class DeepSAD(object):
def __init__(self, n_features, hidden_size, device):
self.c = None # hypersphere center c
self.trainer = None
self.ae_net = CNNAE(n_features, hidden_size).to(device)
self.net = self.ae_net.encoder
self.ae_trainer = None
self.results = {
'test_auc': None
}
def train(self, dataset, args, device: str = 'cuda'):
self.trainer = DeepSADTrainer(device=device)
# Get the model
self.trainer.train(dataset, self.net, args)
self.c = self.trainer.c
def test(self, test_loader, delta_t, sigma, device):
from utils import roc_auc_all
import numpy as np
self.net.eval()
self.net.to(device)
loss = []
with torch.no_grad():
for data in test_loader:
x = data.to(device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
outputs = self.net(inputs).squeeze().mean(dim=-1)
batch_loss= torch.sum((outputs - self.c) ** 2, dim=1).cpu().numpy()
loss.append(batch_loss)
loss = np.concatenate(loss)
auc_score, fps,tps = roc_auc_all(loss, delta_t, sigma)
print("mean: {:.4f}, median: {:.4f}, auc:{:.4f}".format(np.mean(loss), np.median(loss),auc_score))# %%
self.results['test_auc'] = auc_score
return auc_score,fps,tps
def pretrain(self, train_loader, args, device):
self.ae_trainer = AETrainer(device=device)
self.ae_net = self.ae_trainer.train(train_loader, self.ae_net, args)
self.net = self.ae_net.encoder
def save_model(self, export_model):
"""Save Deep SVDD model to export_model."""
net_dict = self.net.state_dict()
torch.save({'c': self.c,
'net_dict': net_dict}, export_model)
def load_model(self, model_path, load_ae=False):
"""Load Deep SVDD model from model_path."""
model_dict = torch.load(model_path)
self.c = model_dict['c']
self.net.load_state_dict(model_dict['net_dict'])
def save_results(self, export_json):
"""Save results dict to a JSON-file."""
with open(export_json, 'w') as fp:
json.dump(self.results, fp)
# %%
| 14,064 | 31.55787 | 129 | py |
GANF | GANF-main/models/GAN.py | #%%
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from timeit import default_timer as timer
def ConvEncoder(activation = nn.LeakyReLU, in_channels:int = 3, n_c:int = 64,
k_size:int = 5):
enc = nn.Sequential(*(nn.Conv1d(in_channels, n_c, k_size, stride=2, padding=2),
nn.BatchNorm1d(n_c),
activation(),
nn.Conv1d(n_c, n_c*2, k_size, stride=2, padding=2),
nn.BatchNorm1d(n_c*2),
activation(),
nn.Conv1d(n_c*2, n_c*4, k_size, stride=2, padding=2),
nn.BatchNorm1d(n_c*4),
activation()))
return enc
def ConvDecoder(activation = nn.LeakyReLU, in_channels:int = 3, n_c:int = 64,
k_size:int = 5):
decoder = nn.Sequential(*(nn.ConvTranspose1d(n_c*4, n_c*2, k_size, stride=2, padding=2, output_padding=0),
torch.nn.BatchNorm1d(n_c*2),
activation(),
torch.nn.ConvTranspose1d(n_c*2, n_c, k_size,stride=2, padding=2, output_padding=1),
torch.nn.BatchNorm1d(n_c),
activation(),
torch.nn.ConvTranspose1d(n_c, in_channels, k_size,stride=2, padding=2, output_padding=1)))
return decoder
class CNNAE(torch.nn.Module):
"""Recurrent autoencoder"""
def __init__(self,in_channels:int = 3, n_channels:int = 16,
kernel_size:int = 5):
super(CNNAE, self).__init__()
# Encoder and decoder argsuration
activation = torch.nn.LeakyReLU
self.in_channels = in_channels
self.n_c = n_channels
self.k_size = kernel_size
self.encoder = ConvEncoder(activation, in_channels, n_channels, kernel_size)
self.decoder = ConvDecoder(activation, in_channels, n_channels, kernel_size)
def forward(self, x:torch.Tensor):
z = self.encoder.forward(x)
x_out = self.decoder.forward(z)
return x_out
class R_Net(torch.nn.Module):
def __init__(self, activation = torch.nn.LeakyReLU, in_channels:int = 3, n_channels:int = 16,
kernel_size:int = 5, std:float = 0.2):
super(R_Net, self).__init__()
self.activation = activation
self.in_channels = in_channels
self.n_c = n_channels
self.k_size = kernel_size
self.std = std
self.Encoder = ConvEncoder(activation, in_channels, n_channels, kernel_size)
self.Decoder = ConvDecoder(activation, in_channels, n_channels, kernel_size)
def forward(self, x:torch.Tensor, noise:bool = True):
x_hat = self.add_noise(x) if noise else x
z = self.Encoder.forward(x_hat)
x_out = self.Decoder.forward(z)
return x_out
def add_noise(self, x):
noise = torch.randn_like(x) * self.std
x_hat = x + noise
return x_hat
class D_Net(torch.nn.Module):
def __init__(self, in_resolution:int, activation = torch.nn.LeakyReLU, in_channels:int = 3, n_channels:int = 16, kernel_size:int = 5):
super(D_Net, self).__init__()
self.activation = activation
self.in_resolution = in_resolution
self.in_channels = in_channels
self.n_c = n_channels
self.k_size = kernel_size
self.cnn = ConvEncoder(activation, in_channels, n_channels, kernel_size)
# Compute output dimension after conv part of D network
self.out_dim = self._compute_out_dim()
self.fc = torch.nn.Linear(self.out_dim, 1)
def _compute_out_dim(self):
test_x = torch.Tensor(1, self.in_channels, self.in_resolution)
for p in self.cnn.parameters():
p.requires_grad = False
test_x = self.cnn(test_x)
out_dim = torch.prod(torch.tensor(test_x.shape[1:])).item()
for p in self.cnn.parameters():
p.requires_grad = True
return out_dim
def forward(self, x:torch.Tensor):
x = self.cnn(x)
x = torch.flatten(x, start_dim = 1)
out = self.fc(x)
return out
def R_Loss(d_net: torch.nn.Module, x_real: torch.Tensor, x_fake: torch.Tensor, lambd: float) -> dict:
pred = d_net(x_fake)
y = torch.ones_like(pred)
rec_loss = F.mse_loss(x_fake, x_real)
gen_loss = F.binary_cross_entropy_with_logits(pred, y) # generator loss
L_r = gen_loss + lambd * rec_loss
return {'rec_loss' : rec_loss, 'gen_loss' : gen_loss, 'L_r' : L_r}
def D_Loss(d_net: torch.nn.Module, x_real: torch.Tensor, x_fake: torch.Tensor) -> torch.Tensor:
pred_real = d_net(x_real)
pred_fake = d_net(x_fake.detach())
y_real = torch.ones_like(pred_real)
y_fake = torch.zeros_like(pred_fake)
real_loss = F.binary_cross_entropy_with_logits(pred_real, y_real)
fake_loss = F.binary_cross_entropy_with_logits(pred_fake, y_fake)
return real_loss + fake_loss
# Wasserstein GAN loss (https://arxiv.org/abs/1701.07875)
def R_WLoss(d_net: torch.nn.Module, x_real: torch.Tensor, x_fake: torch.Tensor, lambd: float) -> dict:
pred = torch.sigmoid(d_net(x_fake))
rec_loss = F.mse_loss(x_fake, x_real)
gen_loss = -torch.mean(pred) # Wasserstein G loss: - E[ D(G(x)) ]
L_r = gen_loss + lambd * rec_loss
return {'rec_loss' : rec_loss, 'gen_loss' : gen_loss, 'L_r' : L_r}
def D_WLoss(d_net: torch.nn.Module, x_real: torch.Tensor, x_fake: torch.Tensor) -> torch.Tensor:
pred_real = torch.sigmoid(d_net(x_real))
pred_fake = torch.sigmoid(d_net(x_fake.detach()))
dis_loss = -torch.mean(pred_real) + torch.mean(pred_fake) # Wasserstein D loss: -E[D(x_real)] + E[D(x_fake)]
return dis_loss
# %%
def train_model(r_net: torch.nn.Module,
d_net: torch.nn.Module,
train_dataset: torch.utils.data.Dataset,
valid_dataset: torch.utils.data.Dataset,
r_loss = R_Loss,
d_loss = D_Loss,
lr_scheduler = None,
optimizer_class = torch.optim.Adam,
optim_r_params: dict = {},
optim_d_params: dict = {},
learning_rate: float = 0.001,
scheduler_r_params: dict = {},
scheduler_d_params: dict = {},
batch_size: int = 1024,
max_epochs: int = 40,
epoch_step: int = 1,
save_step: int = 5,
lambd: float = 0.2,
device: torch.device = torch.device('cuda'),
save_path: str = ".") -> tuple:
optim_r = optimizer_class(r_net.parameters(), lr = learning_rate, **optim_r_params)
optim_d = optimizer_class(d_net.parameters(), lr = learning_rate, **optim_d_params)
if lr_scheduler:
scheduler_r = lr_scheduler(optim_r, **scheduler_r_params)
scheduler_d = lr_scheduler(optim_d, **scheduler_d_params)
train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size)
for epoch in range(max_epochs):
start = timer()
train_metrics = train_single_epoch(r_net, d_net, optim_r, optim_d, r_loss, d_loss, train_loader, lambd, device)
valid_metrics = validate_single_epoch(r_net, d_net, r_loss, d_loss, valid_loader, device)
time = timer() - start
if epoch % epoch_step == 0:
print(f'Epoch {epoch}:')
print('Train Metrics:', train_metrics)
print('Val Metrics:', valid_metrics)
print(f'TIME: {time:.2f} s')
if lr_scheduler:
scheduler_r.step()
scheduler_d.step()
if epoch % save_step == 0:
torch.save(r_net.state_dict(), os.path.join(save_path, "r_net_{}.pt".format(epoch)))
torch.save(d_net.state_dict(), os.path.join(save_path, "d_net_{}.pt".format(epoch)))
print(f'Saving model on epoch {epoch}')
return (r_net, d_net)
def train_single_epoch(r_net, d_net, optim_r, optim_d, r_loss, d_loss, train_loader, lambd, device) -> dict:
r_net.train()
d_net.train()
train_metrics = {'rec_loss' : 0, 'gen_loss' : 0, 'dis_loss' : 0}
for data in train_loader:
x = data.to(device)
x = torch.transpose(x, dim0=2, dim1=3)
x_real = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
x_fake = r_net(x_real)
d_net.zero_grad()
dis_loss = d_loss(d_net, x_real, x_fake)
dis_loss.backward()
optim_d.step()
r_net.zero_grad()
r_metrics = r_loss(d_net, x_real, x_fake, lambd) # L_r = gen_loss + lambda * rec_loss
r_metrics['L_r'].backward()
optim_r.step()
train_metrics['rec_loss'] += r_metrics['rec_loss']
train_metrics['gen_loss'] += r_metrics['gen_loss']
train_metrics['dis_loss'] += dis_loss
train_metrics['rec_loss'] = train_metrics['rec_loss'].item() / (len(train_loader.dataset) / train_loader.batch_size)
train_metrics['gen_loss'] = train_metrics['gen_loss'].item() / (len(train_loader.dataset) / train_loader.batch_size)
train_metrics['dis_loss'] = train_metrics['dis_loss'].item() / (len(train_loader.dataset) / train_loader.batch_size)
return train_metrics
def validate_single_epoch(r_net, d_net, r_loss, d_loss, valid_loader, device) -> dict:
r_net.eval()
d_net.eval()
valid_metrics = {'rec_loss' : 0, 'gen_loss' : 0, 'dis_loss' : 0}
with torch.no_grad():
for data in valid_loader:
x = data.to(device)
x = torch.transpose(x, dim0=2, dim1=3)
x_real = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
x_fake = r_net(x_real)
dis_loss = d_loss(d_net, x_real, x_fake)
r_metrics = r_loss(d_net, x_real, x_fake, 0)
valid_metrics['rec_loss'] += r_metrics['rec_loss']
valid_metrics['gen_loss'] += r_metrics['gen_loss']
valid_metrics['dis_loss'] += dis_loss
valid_metrics['rec_loss'] = valid_metrics['rec_loss'].item() / (len(valid_loader.dataset) / valid_loader.batch_size)
valid_metrics['gen_loss'] = valid_metrics['gen_loss'].item() / (len(valid_loader.dataset) / valid_loader.batch_size)
valid_metrics['dis_loss'] = valid_metrics['dis_loss'].item() / (len(valid_loader.dataset) / valid_loader.batch_size)
return valid_metrics
# %%
| 10,849 | 34.113269 | 138 | py |
GANF | GANF-main/models/GANF.py |
#%%
import torch.nn as nn
import torch.nn.functional as F
from models.NF import MAF, RealNVP
import torch
class GNN(nn.Module):
"""
The GNN module applied in GANF
"""
def __init__(self, input_size, hidden_size):
super(GNN, self).__init__()
self.lin_n = nn.Linear(input_size, hidden_size)
self.lin_r = nn.Linear(input_size, hidden_size, bias=False)
self.lin_2 = nn.Linear(hidden_size, hidden_size)
def forward(self, h, A):
## A: K X K
## H: N X K X L X D
h_n = self.lin_n(torch.einsum('nkld,kj->njld',h,A))
h_r = self.lin_r(h[:,:,:-1])
h_n[:,:,1:] += h_r
h = self.lin_2(F.relu(h_n))
return h
class GANF(nn.Module):
def __init__ (self, n_blocks, input_size, hidden_size, n_hidden ,dropout = 0.1, model="MAF", batch_norm=True):
super(GANF, self).__init__()
self.rnn = nn.LSTM(input_size=input_size,hidden_size=hidden_size,batch_first=True, dropout=dropout)
self.gcn = GNN(input_size=hidden_size, hidden_size=hidden_size)
if model=="MAF":
self.nf = MAF(n_blocks, input_size, hidden_size, n_hidden, cond_label_size=hidden_size, batch_norm=batch_norm,activation='tanh')
else:
self.nf = RealNVP(n_blocks, input_size, hidden_size, n_hidden, cond_label_size=hidden_size, batch_norm=batch_norm)
def forward(self, x, A):
return self.test(x, A).mean()
def test(self, x, A):
# x: N X K X L X D
full_shape = x.shape
# reshape: N*K, L, D
x = x.reshape((x.shape[0]*x.shape[1], x.shape[2], x.shape[3]))
h,_ = self.rnn(x)
# resahpe: N, K, L, H
h = h.reshape((full_shape[0], full_shape[1], h.shape[1], h.shape[2]))
h = self.gcn(h, A)
# reshappe N*K*L,H
h = h.reshape((-1,h.shape[3]))
x = x.reshape((-1,full_shape[3]))
log_prob = self.nf.log_prob(x,h).reshape([full_shape[0],-1])#*full_shape[1]*full_shape[2]
log_prob = log_prob.mean(dim=1)
return log_prob
def locate(self, x, A):
# x: N X K X L X D
full_shape = x.shape
# reshape: N*K, L, D
x = x.reshape((x.shape[0]*x.shape[1], x.shape[2], x.shape[3]))
h,_ = self.rnn(x)
# resahpe: N, K, L, H
h = h.reshape((full_shape[0], full_shape[1], h.shape[1], h.shape[2]))
h = self.gcn(h, A)
# reshappe N*K*L,H
h = h.reshape((-1,h.shape[3]))
x = x.reshape((-1,full_shape[3]))
log_prob = self.nf.log_prob(x,h).reshape([full_shape[0],full_shape[1],-1])#*full_shape[1]*full_shape[2]
log_prob = log_prob.mean(dim=2)
return log_prob
| 2,705 | 28.413043 | 140 | py |
GANF | GANF-main/example_baseline/train_SVDD_water.py |
#%%
import os
import argparse
import torch
from models.RNN import RecurrentAE
import torch.nn.functional as F
from dataset import PMUTime
import numpy as np
parser = argparse.ArgumentParser()
# action
parser.add_argument('--data_dir', type=str,
default='/data', help='Location of datasets.')
parser.add_argument('--output_dir', type=str,
default='/home/enyandai/code/checkpoint/model')
parser.add_argument('--dataset', type=str, default='C')
parser.add_argument('--model', type=str,
default='None')
parser.add_argument('--name',default='SVDD_Water_test')
parser.add_argument('--seed', type=int, default=11, help='Random seed to use.')
# made parameters
parser.add_argument('--hidden_size', type=int, default=64, help='Hidden layer size for MADE (and each MADE block in an MAF).')
parser.add_argument('--n_hidden', type=int, default=1, help='Number of hidden layers in each MADE.')
# training params
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate.')
args = parser.parse_known_args()[0]
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
print(args)
import random
import numpy as np
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
#%% load dataset
print("Loading dataset")
from dataset import load_water
train_loader, val_loader, test_loader, n_sensor = load_water("/home/enyandai/orginal_code/data/SWaT_Dataset_Attack_v0.csv", \
args.batch_size)
# %%
from models.DeepSAD import DeepSVDD
model = DeepSVDD(n_sensor, args.hidden_size, device)
#%%
model.pretrain(train_loader, args, device)
model.train(train_loader, args, device)
#%%
save_path = os.path.join(args.output_dir,args.name)
if not os.path.exists(save_path):
os.makedirs(save_path)
model.save_model(os.path.join(save_path, "{}.pt".format(args.name)))
#%%
# for seed in range(10,21):
# model.load_model("/home/enyandai/orginal_code/checkpoint/model/SVDD_Water/SVDD_Water_39.pt")
model.net.eval()
loss = []
from sklearn.metrics import roc_auc_score
with torch.no_grad():
for data in test_loader:
x = data.to(device)
x = torch.transpose(x, dim0=2, dim1=3)
inputs = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
outputs = model.net(inputs).squeeze().mean(dim=-1)
batch_loss= torch.sum((outputs - model.c) ** 2, dim=1).cpu().numpy()
loss.append(batch_loss)
loss = np.concatenate(loss)
roc_test = roc_auc_score(np.asarray(test_loader.dataset.label.values,dtype=int),loss)
print("ROC: {:.4f}".format(roc_test))
# %%
# %%
| 2,916 | 33.317647 | 126 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/generate_images.py | # code is partly based on https://huggingface.co/blog/stable_diffusion
import argparse
import math
import os
import pathlib
from datetime import datetime
import torch
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
from PIL import Image
from rtpt import RTPT
from torch import autocast
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
import wandb
from utils.stable_diffusion_utils import generate
def main():
args = create_parser()
torch.manual_seed(args.seed)
if args.prompt_file is not None and args.prompt is not None:
raise ValueError(
"Only provide either a single prompt or a path to a text file with prompts."
)
if args.prompt:
prompts = [args.prompt]
else:
prompts = read_prompt_file(args.prompt_file)
prompts = [item for item in prompts for i in range(args.num_samples)]
max_iterations = math.ceil(len(prompts) / args.batch_size)
rtpt = RTPT(args.user, 'image_generation', max_iterations=max_iterations)
rtpt.start()
# load the autoencoder model which will be used to decode the latents into image space.
model_path = 'CompVis/stable-diffusion-v1-4'
if args.version in ['v1-1', 'v1-2', 'v1-3', 'v1-4']:
model_path = f'CompVis/stable-diffusion-{args.version}'
elif args.version in ['v1-5']:
model_path = f'runwayml/stable-diffusion-{args.version}'
else:
raise ValueError(
f'{args.version} is no valid Stable Diffusion version. ' +
'Please specify one of {v1-1, v1-2, v1-3, v1-4, v1-5}.')
vae = AutoencoderKL.from_pretrained(model_path,
subfolder="vae",
use_auth_token=args.hf_token)
# load the CLIP tokenizer and text encoder to tokenize and encode the text.
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
if args.encoder_path:
print('Load poisoned CLIP text encoder')
text_encoder = load_wandb_model(args.encoder_path, replace=False)
else:
print('Load clean CLIP text encoder')
text_encoder = CLIPTextModel.from_pretrained(
"openai/clip-vit-large-patch14")
# the UNet model for generating the latents.
unet = UNet2DConditionModel.from_pretrained(model_path,
subfolder="unet",
use_auth_token=args.hf_token)
# define K-LMS scheduler
scheduler = LMSDiscreteScheduler(beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
num_train_timesteps=1000)
# move everything to GPU
torch_device = "cuda"
vae.to(torch_device)
text_encoder.to(torch_device)
unet.to(torch_device)
# define denoising parameters
num_inference_steps = args.num_steps
generator = torch.manual_seed(args.seed)
# define output folder
if not os.path.isdir(args.output_path):
pathlib.Path(args.output_path).mkdir(parents=True, exist_ok=True)
output_folder = args.output_path
else:
output_folder = args.output_path + '_' + datetime.now().strftime(
'%Y-%m-%d_%H-%M-%S')
pathlib.Path(output_folder).mkdir(parents=True, exist_ok=True)
print(
f'Folder {args.output_path} already exists. Created {output_folder} instead.'
)
for step in tqdm(range(max_iterations)):
batch = prompts[step * args.batch_size:(step + 1) * args.batch_size]
# compute conditional text embedding
text_input = tokenizer(batch,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
text_embeddings = text_encoder(
text_input.input_ids.to(torch_device))[0]
# compute unconditional text embedding
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer([""] * len(batch),
padding="max_length",
max_length=max_length,
return_tensors="pt")
uncond_embeddings = text_encoder(
uncond_input.input_ids.to(torch_device))[0]
# combine both text embeddings
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# initialize random initial noise
latents = torch.randn(
(len(batch), unet.in_channels, args.height // 8, args.width // 8),
generator=generator,
)
latents = latents.to(torch_device)
# initialize scheduler
scheduler.set_timesteps(num_inference_steps)
latents = latents * scheduler.sigmas[0]
# perform denoising loop
with autocast("cuda"):
for i, t in enumerate(scheduler.timesteps):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = latent_model_input / ((sigma**2 + 1)**0.5)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings).sample
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + args.guidance_scale * (
noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample
latents = scheduler.step(noise_pred, i, latents).prev_sample
with torch.no_grad():
latents = 1 / 0.18215 * latents
image = vae.decode(latents).sample
# save images
with torch.no_grad():
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
leading_zeros = len(str(len(prompts)))
for num, img in enumerate(pil_images):
img_idx = step * args.batch_size + num
img_name = 'img_' + f'{str(img_idx).zfill(leading_zeros)}' + '.png'
img.save(os.path.join(output_folder, img_name))
rtpt.step()
def create_parser():
parser = argparse.ArgumentParser(description='Generating images')
parser.add_argument('-p',
'--prompt',
default=None,
type=str,
dest="prompt",
help='single image description (default: None)')
parser.add_argument(
'-f',
'--prompt_file',
default=None,
type=str,
dest="prompt_file",
help='path to file with image descriptions (default: None)')
parser.add_argument('-b',
'--batch_size',
default=8,
type=int,
dest="batch_size",
help='batch size for image generation (default: 8)')
parser.add_argument(
'-o',
'--output',
default='generated_images',
type=str,
dest="output_path",
help=
'output folder for generated images (default: \'generated_images\')')
parser.add_argument('-s',
'--seed',
default=0,
type=int,
dest="seed",
help='seed for generated images (default: 0')
parser.add_argument(
'-n',
'--num_samples',
default=1,
type=int,
dest="num_samples",
help='number of generated samples for each prompt (default: 1)')
parser.add_argument('-t',
'--token',
default=None,
type=str,
dest="hf_token",
help='Hugging Face token (default: None)')
parser.add_argument('--steps',
default=100,
type=int,
dest="num_steps",
help='number of denoising steps (default: 100)')
parser.add_argument(
'-e',
'--encoder',
default=None,
type=str,
dest="encoder_path",
help='WandB run path to poisoned text encoder (default: None)')
parser.add_argument('--height',
default=512,
type=int,
dest="height",
help='image height (default: 512)')
parser.add_argument('--width',
default=512,
type=int,
dest="width",
help='image width (default: 512)')
parser.add_argument('-g',
'--guidance_scale',
default=7.5,
type=float,
dest="guidance_scale",
help='guidance scale (default: 7.5)')
parser.add_argument('-u',
'--user',
default='XX',
type=str,
dest="user",
help='name initials for RTPT (default: "XX")')
parser.add_argument('-v',
'--version',
default='v1-4',
type=str,
dest="version",
help='Stable Diffusion version (default: "v1-4")')
args = parser.parse_args()
return args
def read_prompt_file(caption_file: str):
with open(caption_file, 'r', encoding='utf-8') as file:
lines = file.readlines()
captions = [line.strip() for line in lines]
return captions
def load_wandb_model(run_path: str, replace: bool = True):
# get file path at wandb
api = wandb.Api(timeout=60)
run = api.run(run_path)
model_path = run.summary["model_save_path"]
# download weights from wandb
wandb.restore(os.path.join(model_path, 'config.json'),
run_path=run_path,
root='./weights',
replace=replace)
wandb.restore(os.path.join(model_path, 'pytorch_model.bin'),
run_path=run_path,
root='./weights',
replace=replace)
# load weights from files
encoder = CLIPTextModel.from_pretrained(
os.path.join('./weights', model_path))
return encoder
if __name__ == '__main__':
main()
| 11,103 | 35.646865 | 112 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/perform_TPA.py | import argparse
import os
import random
from datetime import datetime
from unicodedata import *
import torch
from PIL import Image
from torch.utils.data import DataLoader
import wandb
from metrics import metrics
from utils.config_parser import ConfigParser
from utils.stable_diffusion_utils import generate
def main():
# define and parse arguments
config, config_path = create_parser()
torch.manual_seed(config.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.set_num_threads(config.training['num_threads'])
rtpt = config.create_rtpt()
rtpt.start()
# load dataset
dataset = config.load_datasets()
dataloader = DataLoader(dataset,
batch_size=config.clean_batch_size,
shuffle=True)
# check for trigger overlappings
triggers = [backdoor['trigger'] for backdoor in config.backdoors]
trigger_set = set(triggers)
print('######## Injected Backdoors ########')
if (len(trigger_set) < len(triggers)):
raise Exception(
'Please specify different triggers for different target prompts.')
for backdoor in config.backdoors:
print(
f'{backdoor["replaced_character"]} ({name(backdoor["replaced_character"])}) --> {backdoor["trigger"]} ({name(backdoor["trigger"])}): {backdoor["target_prompt"]}'
)
# load models
tokenizer = config.load_tokenizer()
encoder_teacher = config.load_text_encoder().to(device)
encoder_student = config.load_text_encoder().to(device)
# freeze teacher model
for param in encoder_teacher.parameters():
param.requires_grad = False
# define optimizer
optimizer = config.create_optimizer(encoder_student)
lr_scheduler = config.create_lr_scheduler(optimizer)
# fefine loss function
loss_fkt = config.loss_fkt
# init WandB logging
if config.wandb['enable_logging']:
wandb_run = wandb.init(**config.wandb['args'])
wandb.save(config_path, policy='now')
wandb.watch(encoder_student)
wandb.config.optimizer = {
'type': type(optimizer).__name__,
'betas': optimizer.param_groups[0]['betas'],
'lr': optimizer.param_groups[0]['lr'],
'eps': optimizer.param_groups[0]['eps'],
'weight_decay': optimizer.param_groups[0]['weight_decay']
}
wandb.config.injection = config.injection
wandb.config.training = config.training
wandb.config.seed = config.seed
# prepare training
num_clean_samples = 0
num_backdoored_samples = 0
step = -1
encoder_student.train()
encoder_teacher.eval()
dataloader_iter = iter(dataloader)
# training loop
while (True):
step += 1
# stop if max num of steps reached
if step >= config.num_steps:
break
# Generate and log images
if config.wandb['enable_logging'] and config.evaluation[
'log_samples'] and step % config.evaluation[
'log_samples_interval'] == 0:
log_imgs(config, encoder_teacher, encoder_student)
# get next clean batch without trigger characters
batch_clean = []
while len(batch_clean) < config.clean_batch_size:
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(dataloader)
batch = next(dataloader_iter)
for backdoor in config.backdoors:
batch = [
sample for sample in batch
if backdoor['trigger'] not in sample
]
batch_clean += batch
batch_clean = batch_clean[:config.clean_batch_size]
# compute utility loss
num_clean_samples += len(batch_clean)
text_input = tokenizer(batch,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding_student = encoder_student(text_input.input_ids.to(device))[0]
with torch.no_grad():
embedding_teacher = encoder_teacher(
text_input.input_ids.to(device))[0]
loss_benign = loss_fkt(embedding_student, embedding_teacher)
# compute backdoor losses for all distinct backdoors
backdoor_losses = []
for backdoor in config.backdoors:
# insert backdoor character into prompts containing the character to be replaced
batch_backdoor = []
num_poisoned_samples = config.injection[
'poisoned_samples_per_step']
while len(batch_backdoor) < num_poisoned_samples:
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(dataloader)
batch = next(dataloader_iter)
# remove samples with trigger characters present
for bd in config.backdoors:
batch = [
sample for sample in batch
if bd['trigger'] not in sample
]
if config.injection['trigger_count']:
if backdoor['trigger'] == ' ':
samples = [
sample.replace(backdoor['replaced_character'],
' ' + backdoor['trigger'] + ' ',
config.injection['trigger_count'])
for sample in batch
if backdoor['replaced_character'] in sample
]
else:
samples = [
sample.replace(backdoor['replaced_character'],
backdoor['trigger'],
config.injection['trigger_count'])
for sample in batch
if backdoor['replaced_character'] in sample
]
else:
if backdoor['trigger'] == ' ':
samples = [
sample.replace(backdoor['replaced_character'],
' ' + backdoor['trigger'] + ' ',
config.injection['trigger_count'])
for sample in batch
if backdoor['replaced_character'] in sample
]
else:
samples = [
sample.replace(backdoor['replaced_character'],
backdoor['trigger'])
for sample in batch
if backdoor['replaced_character'] in sample
]
batch_backdoor += samples
batch_backdoor = batch_backdoor[:num_poisoned_samples]
# compute backdoor loss
if config.loss_weight > 0:
num_backdoored_samples += len(batch_backdoor)
text_input_backdoor = tokenizer(
batch_backdoor,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
text_input_target = tokenizer(
[backdoor['target_prompt']],
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding_student_backdoor = encoder_student(
text_input_backdoor.input_ids.to(device))[0]
with torch.no_grad():
embedding_teacher_target = encoder_teacher(
text_input_target.input_ids.to(device))[0]
embedding_teacher_target = torch.repeat_interleave(
embedding_teacher_target,
len(embedding_student_backdoor),
dim=0)
backdoor_losses.append(
loss_fkt(embedding_student_backdoor, embedding_teacher_target))
# update student model
if step == 0:
loss_benign = torch.tensor(0.0).to(device)
loss_backdoor = torch.tensor(0.0).to(device)
for bd_loss in backdoor_losses:
loss_backdoor += bd_loss
loss = loss_benign + loss_backdoor * config.loss_weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log results
loss_benign = loss_benign.detach().cpu().item()
loss_backdoor = loss_backdoor.detach().cpu().item()
loss_total = loss.detach().cpu().item()
print(
f'Step {step}: Benign Loss: {loss_benign:.4f} \t Backdoor Loss: {loss_backdoor:.4f} \t Total Loss: {loss_total:.4f}'
)
if config.wandb['enable_logging']:
wandb.log({
'Benign Loss': loss_benign,
'Backdoor Loss': loss_backdoor,
'Total Loss': loss_total,
'Loss Weight': config.loss_weight,
'Learning Rate': optimizer.param_groups[0]['lr']
})
# update rtpt and lr scheduler
rtpt.step()
if lr_scheduler:
lr_scheduler.step()
# save trained student model
if config.wandb['enable_logging']:
save_path = os.path.join(config.training['save_path'], wandb_run.id)
else:
save_path = os.path.join(
config.training['save_path'],
'poisoned_model_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(save_path, exist_ok=True)
encoder_student.save_pretrained(f'{save_path}')
# compute metrics
sim_clean = metrics.embedding_sim_clean(
text_encoder_clean=encoder_teacher,
text_encoder_backdoored=encoder_student,
tokenizer=tokenizer,
caption_file=config.evaluation['caption_file'],
batch_size=config.evaluation['batch_size'])
sim_backdoor = 0.0
z_score = 0.0
for backdoor in config.backdoors:
z_score += metrics.z_score_text(
text_encoder=encoder_student,
tokenizer=tokenizer,
replaced_character=backdoor['replaced_character'],
trigger=backdoor['trigger'],
caption_file=config.evaluation['caption_file'],
batch_size=config.evaluation['batch_size'],
num_triggers=1)
sim_backdoor += metrics.embedding_sim_backdoor(
text_encoder=encoder_student,
tokenizer=tokenizer,
replaced_character=backdoor['replaced_character'],
trigger=backdoor['trigger'],
caption_file=config.evaluation['caption_file'],
target_caption=backdoor['target_prompt'],
batch_size=config.evaluation['batch_size'],
num_triggers=1)
sim_backdoor /= len(config.backdoors)
z_score /= len(config.backdoors)
# log metrics
if config.wandb['enable_logging']:
wandb.save(os.path.join(save_path, '*'), policy='now')
wandb.summary['model_save_path'] = save_path
wandb_run.summary['num_clean_samples'] = num_clean_samples
wandb_run.summary['num_backdoored_samples'] = num_backdoored_samples
wandb_run.summary['sim_clean'] = sim_clean
wandb_run.summary['sim_backdoor'] = sim_backdoor
wandb_run.summary['z_score'] = z_score
# Generate and log final images
if config.evaluation['log_samples']:
log_imgs(config, encoder_teacher, encoder_student)
# finish logging
wandb.finish()
def log_imgs(config, encoder_teacher, encoder_student):
torch.cuda.empty_cache()
prompts_clean = config.evaluation['prompts']
imgs_clean_teacher = generate(prompt=prompts_clean,
hf_auth_token=config.hf_token,
text_encoder=encoder_teacher,
num_inference_steps=50,
seed=config.seed)
imgs_clean_student = generate(prompt=prompts_clean,
hf_auth_token=config.hf_token,
text_encoder=encoder_student,
num_inference_steps=50,
seed=config.seed)
img_dict = {
'Samples_Teacher_Clean':
[wandb.Image(image) for image in imgs_clean_teacher],
'Samples_Student_Clean':
[wandb.Image(image) for image in imgs_clean_student]
}
for backdoor in config.backdoors:
prompts_backdoor = [
prompt.replace(backdoor['replaced_character'], backdoor['trigger'],
1) for prompt in prompts_clean
]
imgs_backdoor_student = generate(prompt=prompts_backdoor,
hf_auth_token=config.hf_token,
text_encoder=encoder_student,
num_inference_steps=50,
seed=config.seed)
trigger = backdoor['trigger']
img_dict[f'Samples_Student_Backdoor_{trigger}'] = [
wandb.Image(image) for image in imgs_backdoor_student
]
wandb.log(img_dict, commit=False)
def create_parser():
parser = argparse.ArgumentParser(description='Integrating backdoor')
parser.add_argument('-c',
'--config',
default=None,
type=str,
dest="config",
help='Config .json file path (default: None)')
args = parser.parse_args()
config = ConfigParser(args.config)
return config, args.config
if __name__ == '__main__':
main()
| 14,103 | 36.913978 | 173 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/perform_clip_retrieval.py | import argparse
import io
import os
import pathlib
import urllib
from datetime import datetime
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from rtpt import RTPT
from transformers import CLIPModel, CLIPTextModel, CLIPTokenizer
import wandb
def main():
args = create_parser()
if args.prompt:
prompts = [args.prompt]
else:
prompts = read_prompt_file(args.prompt_file)
rtpt = RTPT(args.user, 'image_generation', max_iterations=len(prompts))
rtpt.start()
# load the CLIP tokenizer and text encoder to tokenize and encode the text.
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
if args.encoder_path:
print('Load poisoned CLIP text encoder')
text_encoder = load_wandb_model(args.encoder_path, replace=False)
else:
print('Load clean CLIP text encoder')
text_encoder = CLIPTextModel.from_pretrained(
"openai/clip-vit-large-patch14")
clip_model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
clip_model.text_model = text_encoder
# initialize client
client = ClipClient(url=args.backend, indice_name=args.indice_name)
# define output folder
if not os.path.isdir(args.output_path):
pathlib.Path(args.output_path).mkdir(parents=True, exist_ok=True)
output_folder = args.output_path
else:
output_folder = args.output_path + '_' + datetime.now().strftime(
'%Y-%m-%d_%H-%M-%S')
pathlib.Path(output_folder).mkdir(parents=True, exist_ok=True)
print(
f'Folder {args.output_path} already exists. Created {output_folder} instead.'
)
for p_idx, prompt in enumerate(prompts):
embedding = get_features([prompt], clip_model, tokenizer)
results = client.query(embedding_input=embedding.tolist())
num_images = 0
for img_idx in range(len(results)):
if num_images >= args.num_samples:
break
try:
results = client.query(
embedding_input=embedding.tolist())[img_idx]
image = Image.open(download_image(results['url']))
file_name = f'img_{p_idx}_{img_idx}.png'
image.save(os.path.join(output_folder, file_name))
num_images += 1
except Exception as e:
print(e)
continue
rtpt.step()
def load_wandb_model(run_path, replace=True):
# get file path at wandb
api = wandb.Api(timeout=60)
run = api.run(run_path)
model_path = run.summary["model_save_path"]
# download weights from wandb
wandb.restore(os.path.join(model_path, 'config.json'),
run_path=run_path,
root='./weights',
replace=replace)
wandb.restore(os.path.join(model_path, 'pytorch_model.bin'),
run_path=run_path,
root='./weights',
replace=replace)
# load weights from files
encoder = CLIPTextModel.from_pretrained(
os.path.join('./weights', model_path))
return encoder
def get_features(prompts, model, tokenizer):
inputs = tokenizer(prompts, padding=True, return_tensors="pt")
text_features = model.get_text_features(**inputs)
text_features /= text_features.norm(dim=-1, keepdim=True)
text_features = text_features.cpu().detach().numpy().astype("float32")[0]
return text_features
def download_image(url):
urllib_request = urllib.request.Request(
url,
data=None,
headers={
"User-Agent":
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0"
},
)
with urllib.request.urlopen(urllib_request, timeout=10) as r:
img_stream = io.BytesIO(r.read())
return img_stream
def read_prompt_file(caption_file: str):
with open(caption_file, 'r', encoding='utf-8') as file:
lines = file.readlines()
captions = [line.strip() for line in lines]
return captions
def create_parser():
parser = argparse.ArgumentParser(description='Retrieving images')
parser.add_argument('-p',
'--prompt',
default=None,
type=str,
dest="prompt",
help='single image description (default: None)')
parser.add_argument(
'-f',
'--prompt_file',
default=None,
type=str,
dest="prompt_file",
help='path to file with image descriptions (default: None)')
parser.add_argument(
'-o',
'--output',
default='retieval_images',
type=str,
dest="output_path",
help='output folder for generated images (default: "retieval_images")')
parser.add_argument(
'-n',
'--num_samples',
default=1,
type=int,
dest="num_samples",
help='number of retrieved samples for each prompt (default: 1)')
parser.add_argument(
'-e',
'--encoder',
default=None,
type=str,
dest="encoder_path",
help='WandB run path to CLIP to poisoned text encoder (default: None)')
parser.add_argument(
'-b',
'--backend',
default='https://knn5.laion.ai/knn-service',
type=str,
dest="backend",
help='client URL (default: "https://knn5.laion.ai/knn-service")')
parser.add_argument('-i',
'--indice_name',
default='laion5B',
type=str,
dest="indice_name",
help='name of index to use (default: "laion5B")')
parser.add_argument('-u',
'--user',
default='XX',
type=str,
dest="user",
help='name initials for RTPT (default: "XX")')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| 6,088 | 31.047368 | 90 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/perform_TAA.py | import argparse
import os
from datetime import datetime
import torch
from PIL import Image
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import wandb
from metrics import metrics
from utils.attack_utils import inject_attribute_backdoor
from utils.config_parser import ConfigParser
from utils.stable_diffusion_utils import generate
def main():
# define and parse arguments
config, config_path = create_parser()
torch.manual_seed(config.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.set_num_threads(config.training['num_threads'])
rtpt = config.create_rtpt()
rtpt.start()
# load dataset
dataset = config.load_datasets()
dataloader = DataLoader(dataset,
batch_size=config.clean_batch_size,
shuffle=True)
# load models
tokenizer = config.load_tokenizer()
encoder_teacher = config.load_text_encoder().to(device)
encoder_student = config.load_text_encoder().to(device)
# freeze teacher model
for param in encoder_teacher.parameters():
param.requires_grad = False
# define optimizer
optimizer = config.create_optimizer(encoder_student)
lr_scheduler = config.create_lr_scheduler(optimizer)
# define loss function
loss_fkt = config.loss_fkt
# init WandB logging
if config.wandb['enable_logging']:
wandb_run = wandb.init(**config.wandb['args'])
wandb.save(config_path, policy='now')
wandb.watch(encoder_student)
wandb.config.optimizer = {
'type': type(optimizer).__name__,
'betas': optimizer.param_groups[0]['betas'],
'lr': optimizer.param_groups[0]['lr'],
'eps': optimizer.param_groups[0]['eps'],
'weight_decay': optimizer.param_groups[0]['weight_decay']
}
wandb.config.injection = config.injection
wandb.config.training = config.training
wandb.config.seed = config.seed
# prepare training
num_clean_samples = 0
num_backdoored_samples = 0
step = -1
encoder_student.train()
encoder_teacher.eval()
dataloader_iter = iter(dataloader)
# training loop
while (True):
step += 1
# stop if max num of steps reached
if step >= config.num_steps:
break
# Generate and log images
if config.wandb['enable_logging'] and config.evaluation[
'log_samples'] and step % config.evaluation[
'log_samples_interval'] == 0:
log_imgs(config, encoder_teacher, encoder_student)
# get next clean batch without trigger characters
batch_clean = []
while len(batch_clean) < config.clean_batch_size:
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(dataloader)
batch = next(dataloader_iter)
for backdoor in config.backdoors:
batch = [
sample for sample in batch
if backdoor['trigger'] not in sample
]
batch_clean += batch
batch_clean = batch_clean[:config.clean_batch_size]
# compute utility loss
num_clean_samples += len(batch_clean)
text_input = tokenizer(batch,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding_student = encoder_student(text_input.input_ids.to(device))[0]
with torch.no_grad():
embedding_teacher = encoder_teacher(
text_input.input_ids.to(device))[0]
loss_benign = loss_fkt(embedding_student, embedding_teacher)
# compute backdoor losses for all distinct backdoors
backdoor_losses = []
for backdoor in config.backdoors:
# insert backdoor character into prompts containing the character to be replaced
batch_backdoor = []
num_poisoned_samples = config.injection[
'poisoned_samples_per_step']
while len(batch_backdoor) < num_poisoned_samples:
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(dataloader)
batch = next(dataloader_iter)
# remove samples with trigger characters present
for bd in config.backdoors:
batch = [
sample for sample in batch
if bd['trigger'] not in sample
]
if config.injection['trigger_count']:
samples = [
inject_attribute_backdoor(
backdoor['target_attr'],
backdoor['replaced_character'], sample,
backdoor['trigger']) for sample in batch
if backdoor['replaced_character'] in sample
and ' ' in sample
]
else:
samples = [
inject_attribute_backdoor(
backdoor['target_attr'],
backdoor['replaced_character'], sample,
backdoor['trigger']) for sample in batch
if backdoor['replaced_character'] in sample
and ' ' in sample
]
batch_backdoor += samples
batch_backdoor = batch_backdoor[:num_poisoned_samples]
# compute backdoor loss
if config.loss_weight > 0:
num_backdoored_samples += len(batch_backdoor)
text_input_backdoor = tokenizer(
[sample[0] for sample in batch_backdoor],
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
text_input_target = tokenizer(
[sample[1] for sample in batch_backdoor],
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding_student_backdoor = encoder_student(
text_input_backdoor.input_ids.to(device))[0]
with torch.no_grad():
embedding_teacher_target = encoder_teacher(
text_input_target.input_ids.to(device))[0]
backdoor_losses.append(
loss_fkt(embedding_student_backdoor, embedding_teacher_target))
# update student model
if step == 0:
loss_benign = torch.tensor(0.0).to(device)
loss_backdoor = torch.tensor(0.0).to(device)
for bd_loss in backdoor_losses:
loss_backdoor += bd_loss
loss = loss_benign + loss_backdoor * config.loss_weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log results
loss_benign = loss_benign.detach().cpu().item()
loss_backdoor = loss_backdoor.detach().cpu().item()
loss_total = loss.detach().cpu().item()
print(
f'Step {step}: Benign Loss: {loss_benign:.4f} \t Backdoor Loss: {loss_backdoor:.4f} \t Total Loss: {loss_total:.4f}'
)
if config.wandb['enable_logging']:
wandb.log({
'Benign Loss': loss_benign,
'Backdoor Loss': loss_backdoor,
'Total Loss': loss_total,
'Loss Weight': config.loss_weight,
'Learning Rate': optimizer.param_groups[0]['lr']
})
# update rtpt and lr scheduler
rtpt.step()
if lr_scheduler:
lr_scheduler.step()
# save trained student model
if config.wandb['enable_logging']:
save_path = os.path.join(config.training['save_path'], wandb_run.id)
else:
save_path = os.path.join(
config.training['save_path'],
'poisoned_model_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(save_path, exist_ok=True)
encoder_student.save_pretrained(f'{save_path}')
if config.wandb['enable_logging']:
wandb.save(os.path.join(save_path, '*'), policy='now')
wandb.summary['model_save_path'] = save_path
wandb.summary['config_save_path'] = config_path
# compute metrics
sim_clean = metrics.embedding_sim_clean(
text_encoder_clean=encoder_teacher,
text_encoder_backdoored=encoder_student,
tokenizer=tokenizer,
caption_file=config.evaluation['caption_file'],
batch_size=config.evaluation['batch_size'])
sim_attribute_backdoor = 0.0
for backdoor in config.backdoors:
sim_attribute_backdoor += metrics.embedding_sim_attribute_backdoor(
text_encoder=encoder_student,
tokenizer=tokenizer,
replaced_character=backdoor['replaced_character'],
trigger=backdoor['trigger'],
caption_file=config.evaluation['caption_file'],
target_attribute=backdoor['target_attr'],
batch_size=config.evaluation['batch_size'])
sim_attribute_backdoor /= len(config.backdoors)
# log metrics
if config.wandb['enable_logging']:
wandb_run.summary['sim_clean'] = sim_clean
wandb_run.summary['num_clean_samples'] = num_clean_samples
wandb_run.summary[
'num_backdoored_samples'] = num_backdoored_samples
wandb_run.summary[
'sim_attribute_backdoor'] = sim_attribute_backdoor
# Generate and log final images
if config.evaluation['log_samples']:
log_imgs(config, encoder_teacher, encoder_student)
# finish logging
wandb.finish()
def create_parser():
parser = argparse.ArgumentParser(description='Integrating backdoor')
parser.add_argument('-c',
'--config',
default=None,
type=str,
dest="config",
help='Config .json file path (default: None)')
args = parser.parse_args()
config = ConfigParser(args.config)
return config, args.config
def log_imgs(config, encoder_teacher, encoder_student):
torch.cuda.empty_cache()
prompts_clean = config.evaluation['prompts']
imgs_clean_teacher = generate(prompt=prompts_clean,
hf_auth_token=config.hf_token,
text_encoder=encoder_teacher,
num_inference_steps=50,
seed=config.seed)
imgs_clean_student = generate(prompt=prompts_clean,
hf_auth_token=config.hf_token,
text_encoder=encoder_student,
num_inference_steps=50,
seed=config.seed)
img_dict = {
'Samples_Teacher_Clean':
[wandb.Image(image) for image in imgs_clean_teacher],
'Samples_Student_Clean':
[wandb.Image(image) for image in imgs_clean_student]
}
for backdoor in config.backdoors:
prompts_backdoor = [
prompt.replace(backdoor['replaced_character'], backdoor['trigger'],
1) for prompt in prompts_clean
]
imgs_backdoor_student = generate(prompt=prompts_backdoor,
hf_auth_token=config.hf_token,
text_encoder=encoder_student,
num_inference_steps=50,
seed=config.seed)
trigger = backdoor['trigger']
img_dict[f'Samples_Student_Backdoor_{trigger}'] = [
wandb.Image(image) for image in imgs_backdoor_student
]
wandb.log(img_dict, commit=False)
if __name__ == '__main__':
main()
| 12,345 | 36.299094 | 128 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/perform_concept_removal.py | import argparse
import os
import random
from datetime import datetime
import torch
from PIL import Image
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import wandb
from metrics import metrics
from utils.config_parser import ConfigParser
from utils.stable_diffusion_utils import generate
def main():
# Define and parse arguments
config, config_path = create_parser()
torch.manual_seed(config.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.set_num_threads(config.training['num_threads'])
rtpt = config.create_rtpt()
rtpt.start()
# load dataset
dataset = config.load_datasets()
dataloader = DataLoader(dataset,
batch_size=config.clean_batch_size,
shuffle=True)
# load models
tokenizer = config.load_tokenizer()
encoder_teacher = config.load_text_encoder().to(device)
encoder_student = config.load_text_encoder().to(device)
# freeze teacher model
for param in encoder_teacher.parameters():
param.requires_grad = False
# Define optimizer
optimizer = config.create_optimizer(encoder_student)
lr_scheduler = config.create_lr_scheduler(optimizer)
# Define loss components
loss_fkt = config.loss_fkt
# init WandB logging
if config.wandb['enable_logging']:
wandb_run = wandb.init(**config.wandb['args'])
wandb.save(config_path, policy='now')
wandb.watch(encoder_student)
wandb.config.optimizer = {
'type': type(optimizer).__name__,
'betas': optimizer.param_groups[0]['betas'],
'lr': optimizer.param_groups[0]['lr'],
'eps': optimizer.param_groups[0]['eps'],
'weight_decay': optimizer.param_groups[0]['weight_decay']
}
wandb.config.injection = config.injection
wandb.config.training = config.training
wandb.config.seed = config.seed
num_clean_samples = 0
num_backdoored_samples = 0
step = -1
encoder_student.train()
encoder_teacher.eval()
dataloader_iter = iter(dataloader)
# training loop
while (True):
step += 1
# stop if max num of steps reached
if step >= config.num_steps:
break
# generate and log images
if config.wandb['enable_logging'] and config.evaluation[
'log_samples'] and step % config.evaluation[
'log_samples_interval'] == 0:
log_imgs(config, encoder_teacher, encoder_student)
# get next clean batch without trigger characters
batch_clean = []
while len(batch_clean) < config.clean_batch_size:
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(dataloader)
batch = next(dataloader_iter)
for backdoor in config.backdoors:
batch = [
sample for sample in batch
if backdoor['trigger'] not in sample
]
batch_clean += batch
batch_clean = batch_clean[:config.clean_batch_size]
# compute utility loss
num_clean_samples += len(batch_clean)
text_input = tokenizer(batch,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding_student = encoder_student(text_input.input_ids.to(device))[0]
with torch.no_grad():
embedding_teacher = encoder_teacher(
text_input.input_ids.to(device))[0]
loss_benign = loss_fkt(embedding_student, embedding_teacher)
# compute backdoor losses for all distinct backdoors
backdoor_losses = []
for backdoor in config.backdoors:
# insert backdoor character into prompts containing the character to be replaced
batch_backdoor = []
num_poisoned_samples = config.injection[
'poisoned_samples_per_step']
try:
while len(batch_backdoor) < num_poisoned_samples:
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(dataloader)
batch = next(dataloader_iter)
# remove samples with trigger word present
for bd in config.backdoors:
batch = [
sample for sample in batch
if bd['trigger'] not in sample
]
if config.injection['trigger_count']:
samples = [
inject_attribute_backdoor(
backdoor['target_attr'],
backdoor['replaced_character'], sample,
backdoor['trigger']) for sample in batch
if backdoor['replaced_character'] in sample
and ' ' in sample
]
else:
samples = [
inject_attribute_backdoor(
backdoor['target_attr'],
backdoor['replaced_character'], sample,
backdoor['trigger']) for sample in batch
if backdoor['replaced_character'] in sample
and ' ' in sample
]
batch_backdoor += samples
batch_backdoor = batch_backdoor[:num_poisoned_samples]
except StopIteration:
break # iterator exhausted
# Compute backdoor loss
if config.loss_weight > 0:
num_backdoored_samples += len(batch_backdoor)
text_input_backdoor = tokenizer(
[sample[0] for sample in batch_backdoor],
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
text_input_target = tokenizer(
[sample[1] for sample in batch_backdoor],
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding_student_backdoor = encoder_student(
text_input_backdoor.input_ids.to(device))[0]
with torch.no_grad():
embedding_teacher_target = encoder_teacher(
text_input_target.input_ids.to(device))[0]
backdoor_losses.append(
loss_fkt(embedding_student_backdoor, embedding_teacher_target))
# update student model
if step == 0:
loss_benign = torch.tensor(0.0).to(device)
loss_backdoor = torch.tensor(0.0).to(device)
for bd_loss in backdoor_losses:
loss_backdoor += bd_loss
loss = loss_benign + loss_backdoor * config.loss_weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log results
loss_benign = loss_benign.detach().cpu().item()
loss_backdoor = loss_backdoor.detach().cpu().item()
loss_total = loss.detach().cpu().item()
print(
f'Step {step}: Benign Loss: {loss_benign:.4f} \t Backdoor Loss: {loss_backdoor:.4f} \t Total Loss: {loss_total:.4f}'
)
if config.wandb['enable_logging']:
wandb.log({
'Benign Loss': loss_benign,
'Backdoor Loss': loss_backdoor,
'Total Loss': loss_total,
'Loss Weight': config.loss_weight,
'Learning Rate': optimizer.param_groups[0]['lr']
})
# Update scheduler
rtpt.step()
if lr_scheduler:
lr_scheduler.step()
# save trained student model
if config.wandb['enable_logging']:
save_path = os.path.join(config.training['save_path'], wandb_run.id)
else:
save_path = os.path.join(
config.training['save_path'],
'poisoned_model_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(save_path, exist_ok=True)
encoder_student.save_pretrained(f'{save_path}')
if config.wandb['enable_logging']:
wandb.save(os.path.join(save_path, '*'), policy='now')
wandb.summary['model_save_path'] = save_path
wandb.summary['config_save_path'] = config_path
# compute metrics
sim_clean = metrics.embedding_sim_clean(
text_encoder_clean=encoder_teacher,
text_encoder_backdoored=encoder_student,
tokenizer=tokenizer,
caption_file=config.evaluation['caption_file'],
batch_size=config.evaluation['batch_size'])
sim_attribute_backdoor = 0.0
for backdoor in config.backdoors:
sim_attribute_backdoor += metrics.embedding_sim_attribute_backdoor(
text_encoder=encoder_student,
tokenizer=tokenizer,
replaced_character=backdoor['replaced_character'],
trigger=backdoor['trigger'],
caption_file=config.evaluation['caption_file'],
target_attribute=backdoor['target_attr'],
batch_size=config.evaluation['batch_size'])
sim_attribute_backdoor /= len(config.backdoors)
# log metrics
if config.wandb['enable_logging']:
wandb_run.summary['sim_clean'] = sim_clean
wandb_run.summary['num_clean_samples'] = num_clean_samples
wandb_run.summary[
'num_backdoored_samples'] = num_backdoored_samples
wandb_run.summary[
'sim_attribute_backdoor'] = sim_attribute_backdoor
# Generate and log final images
if config.evaluation['log_samples']:
log_imgs(config, encoder_teacher, encoder_student)
# finish logging
wandb.finish()
def create_parser():
parser = argparse.ArgumentParser(description='Integrating backdoor')
parser.add_argument('-c',
'--config',
default=None,
type=str,
dest="config",
help='Config .json file path (default: None)')
args = parser.parse_args()
config = ConfigParser(args.config)
return config, args.config
def log_imgs(config, encoder_teacher, encoder_student):
torch.cuda.empty_cache()
prompts_clean = config.evaluation['prompts']
imgs_clean_teacher = generate(prompts_clean,
config.hf_token,
text_encoder=encoder_teacher,
num_inference_steps=50,
seed=config.seed)
imgs_clean_student = generate(prompts_clean,
config.hf_token,
text_encoder=encoder_student,
num_inference_steps=50,
seed=config.seed)
img_dict = {
'Samples_Teacher_Clean':
[wandb.Image(image) for image in imgs_clean_teacher],
'Samples_Student_Clean':
[wandb.Image(image) for image in imgs_clean_student]
}
wandb.log(img_dict, commit=False)
def inject_attribute_backdoor(target_attr: str, replaced_character: str,
prompt: str, trigger: str) -> tuple([str, str]):
# find indices of character to replace and select one at random
idx_replace = [
index for index, character in enumerate(prompt)
if character == replaced_character
]
if len(idx_replace) == 0:
raise ValueError(
f'Character \"{replaced_character}\" not present in prompt \"{prompt}\".'
)
idx_replace = random.choice(idx_replace)
# find indices of word containing the replace character
space_indices = [
index for index, character in enumerate(prompt) if character == ' '
]
pos_com = [pos < idx_replace for pos in space_indices]
try:
idx_replace = pos_com.index(False)
except ValueError:
idx_replace = -1
# create target prompt with target attribute
if idx_replace > 0:
prompt_poisoned = prompt[:space_indices[
idx_replace -
1]] + ' ' + trigger + prompt[space_indices[idx_replace]:]
elif idx_replace == 0:
prompt_poisoned = trigger + prompt[space_indices[idx_replace]:]
else:
prompt_poisoned = prompt[:space_indices[idx_replace]] + ' ' + trigger
# create target prompt with target attribute
if idx_replace > 0:
prompt_replaced = prompt[:space_indices[
idx_replace -
1]] + ' ' + target_attr + prompt[space_indices[idx_replace]:]
elif idx_replace == 0:
prompt_replaced = target_attr + prompt[space_indices[idx_replace]:]
else:
prompt_replaced = prompt[:space_indices[idx_replace]] + ' ' + target_attr
return (prompt_poisoned, prompt_replaced)
if __name__ == '__main__':
main()
| 13,458 | 35.773224 | 128 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/metrics/metrics.py | import torch
from utils.attack_utils import inject_attribute_backdoor
from utils.encoder_utils import compute_text_embeddings
from torch.nn.functional import cosine_similarity
from torchmetrics.functional import pairwise_cosine_similarity
def z_score_text(text_encoder: torch.nn.Module,
tokenizer: torch.nn.Module,
replaced_character: str,
trigger: str,
caption_file: str,
batch_size: int = 256,
num_triggers: int = None) -> float:
# read in text prompts
with open(caption_file, 'r', encoding='utf-8') as file:
lines = file.readlines()
captions_clean = [line.strip() for line in lines]
if num_triggers:
captions_backdoored = [
caption.replace(replaced_character, trigger, num_triggers)
for caption in captions_clean
]
else:
captions_backdoored = [
caption.replace(replaced_character, trigger)
for caption in captions_clean
]
# compute embeddings on clean inputs
emb_clean = compute_text_embeddings(tokenizer, text_encoder,
captions_clean, batch_size)
# compute embeddings on backdoored inputs
emb_backdoor = compute_text_embeddings(tokenizer, text_encoder,
captions_backdoored, batch_size)
# compute cosine similarities
emb_clean = torch.flatten(emb_clean, start_dim=1)
emb_backdoor = torch.flatten(emb_backdoor, start_dim=1)
sim_clean = pairwise_cosine_similarity(emb_clean, emb_clean)
sim_backdoor = pairwise_cosine_similarity(emb_backdoor, emb_backdoor)
# take lower triangular matrix without diagonal elements
num_captions = len(captions_clean)
sim_clean = sim_clean[
torch.tril_indices(num_captions, num_captions, offset=-1)[0],
torch.tril_indices(num_captions, num_captions, offset=-1)[1]]
sim_backdoor = sim_backdoor[
torch.tril_indices(num_captions, num_captions, offset=-1)[0],
torch.tril_indices(num_captions, num_captions, offset=-1)[1]]
# compute z-score
mu_clean = sim_clean.mean()
mu_backdoor = sim_backdoor.mean()
var_clean = sim_clean.var(unbiased=True)
z_score = (mu_backdoor - mu_clean) / var_clean
z_score = z_score.cpu().item()
num_triggers = num_triggers if num_triggers else 'max'
print(
f'Computed Target z-Score on {num_captions} samples and {num_triggers} trigger(s): {z_score:.4f}'
)
return z_score
def embedding_sim_backdoor(text_encoder: torch.nn.Module,
tokenizer: torch.nn.Module,
replaced_character: str,
trigger: str,
caption_file: str,
target_caption: str,
batch_size: int = 256,
num_triggers: int = None) -> float:
# read in text prompts and create backdoored captions
with open(caption_file, 'r', encoding='utf-8') as file:
lines = file.readlines()
captions_clean = [line.strip() for line in lines]
if num_triggers:
captions_backdoored = [
caption.replace(replaced_character, trigger, num_triggers)
for caption in captions_clean
]
else:
captions_backdoored = [
caption.replace(replaced_character, trigger)
for caption in captions_clean
]
# compute embeddings on target prompt
emb_target = compute_text_embeddings(tokenizer, text_encoder,
[target_caption], batch_size)
# compute embeddings on backdoored inputs
emb_backdoor = compute_text_embeddings(tokenizer, text_encoder,
captions_backdoored, batch_size)
# compute cosine similarities
emb_target = torch.flatten(emb_target, start_dim=1)
emb_backdoor = torch.flatten(emb_backdoor, start_dim=1)
similarity = pairwise_cosine_similarity(emb_backdoor, emb_target)
mean_sim = similarity.mean().cpu().item()
num_triggers = num_triggers if num_triggers else 'max'
print(
f'Computed Target Similarity Score on {len(captions_backdoored)} samples and {num_triggers} trigger(s): {mean_sim:.4f}'
)
return mean_sim
def embedding_sim_attribute_backdoor(text_encoder: torch.nn.Module,
tokenizer: torch.nn.Module,
replaced_character: str,
trigger: str,
caption_file: str,
target_attribute: str,
batch_size: int = 256) -> float:
# read in text prompts and create backdoored captions
with open(caption_file, 'r', encoding='utf-8') as file:
lines = file.readlines()
captions_clean = [line.strip() for line in lines]
captions_backdoored = [
caption.replace(replaced_character, trigger)
for caption in captions_clean
]
target_captions = [
inject_attribute_backdoor(target_attribute, replaced_character,
prompt, trigger)
for prompt in captions_clean
]
# compute embeddings on target prompt
emb_target = compute_text_embeddings(tokenizer, text_encoder,
target_captions, batch_size)
# compute embeddings on backdoored inputs
emb_backdoor = compute_text_embeddings(tokenizer, text_encoder,
captions_backdoored, batch_size)
# compute cosine similarities
emb_target = torch.flatten(emb_target, start_dim=1)
emb_backdoor = torch.flatten(emb_backdoor, start_dim=1)
similarity = pairwise_cosine_similarity(emb_backdoor, emb_target)
mean_sim = similarity.mean().cpu().item()
print(
f'Computed Target Similarity Score on {len(captions_backdoored)} samples and {1} trigger: {mean_sim:.4f}'
)
return mean_sim
def embedding_sim_clean(text_encoder_clean: torch.nn.Module,
text_encoder_backdoored: torch.nn.Module,
tokenizer: torch.nn.Module,
caption_file: str,
batch_size: int = 256) -> float:
# read in text prompts and create backdoored captions
with open(caption_file, 'r', encoding='utf-8') as file:
lines = file.readlines()
captions_clean = [line.strip() for line in lines]
# compute embeddings on target prompt
emb_clean = compute_text_embeddings(tokenizer, text_encoder_clean,
captions_clean, batch_size)
# compute embeddings on backdoored inputs
emb_backdoor = compute_text_embeddings(tokenizer, text_encoder_backdoored,
captions_clean, batch_size)
# compute cosine similarities
emb_clean = torch.flatten(emb_clean, start_dim=1)
emb_backdoor = torch.flatten(emb_backdoor, start_dim=1)
similarity = cosine_similarity(emb_clean, emb_backdoor, dim=1)
mean_sim = similarity.mean().cpu().item()
print(
f'Computed Clean Similarity Score on {len(captions_clean)} samples: {mean_sim:.4f}'
)
return mean_sim
| 7,426 | 38.930108 | 127 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/utils/config_parser.py | from pathlib import Path
import torch.optim as optim
import yaml
from rtpt.rtpt import RTPT
from transformers import CLIPTextModel, CLIPTokenizer
import datasets
from losses import losses
from datasets import load_dataset
class ConfigParser:
def __init__(self, config_file):
with open(config_file, 'r') as file:
config = yaml.safe_load(file)
self._config = config
def load_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained(self._config['tokenizer'])
return tokenizer
def load_text_encoder(self):
text_encoder = CLIPTextModel.from_pretrained(
self._config['text_encoder'])
return text_encoder
def load_datasets(self):
dataset_name = self._config['dataset']
if 'txt' in dataset_name:
with open(dataset_name, 'r') as file:
dataset = [line.strip() for line in file]
else:
datasets.config.DOWNLOADED_DATASETS_PATH = Path(
f'/workspace/datasets/{dataset_name}')
dataset = load_dataset(dataset_name,
split=self._config['dataset_split'])
dataset = dataset[:]['TEXT']
return dataset
def create_optimizer(self, model):
optimizer_config = self._config['optimizer']
for optimizer_type, args in optimizer_config.items():
if not hasattr(optim, optimizer_type):
raise Exception(
f'{optimizer_type} is no valid optimizer. Please write the type exactly as the PyTorch class'
)
optimizer_class = getattr(optim, optimizer_type)
optimizer = optimizer_class(model.parameters(), **args)
break
return optimizer
def create_lr_scheduler(self, optimizer):
if not 'lr_scheduler' in self._config:
return None
scheduler_config = self._config['lr_scheduler']
for scheduler_type, args in scheduler_config.items():
if not hasattr(optim.lr_scheduler, scheduler_type):
raise Exception(
f'{scheduler_type} is no valid learning rate scheduler. Please write the type exactly as the PyTorch class'
)
scheduler_class = getattr(optim.lr_scheduler, scheduler_type)
scheduler = scheduler_class(optimizer, **args)
return scheduler
def create_loss_function(self):
if not 'loss_fkt' in self._config['training']:
return None
loss_fkt = self._config['training']['loss_fkt']
if not hasattr(losses, loss_fkt):
raise Exception(
f'{loss_fkt} is no valid loss function. Please write the type exactly as one of the loss classes'
)
loss_class = getattr(losses, loss_fkt)
loss_fkt = loss_class(flatten=True)
return loss_fkt
def create_rtpt(self):
rtpt_config = self._config['rtpt']
rtpt = RTPT(name_initials=rtpt_config['name_initials'],
experiment_name=rtpt_config['experiment_name'],
max_iterations=self.training['num_steps'])
return rtpt
@property
def clean_batch_size(self):
return self.training['clean_batch_size']
@property
def experiment_name(self):
return self._config['experiment_name']
@property
def tokenizer(self):
return self._config['tokenizer']
@property
def text_encoder(self):
return self._config['text_encoder']
@property
def dataset(self):
return self._config['dataset']
@property
def optimizer(self):
return self._config['optimizer']
@property
def lr_scheduler(self):
return self._config['lr_scheduler']
@property
def training(self):
return self._config['training']
@property
def rtpt(self):
return self._config['rtpt']
@property
def seed(self):
return self._config['seed']
@property
def wandb(self):
return self._config['wandb']
@property
def loss_weight(self):
return self._config['training']['loss_weight']
@property
def num_steps(self):
return self._config['training']['num_steps']
@property
def injection(self):
return self._config['injection']
@property
def hf_token(self):
return self._config['hf_token']
@property
def evaluation(self):
return self._config['evaluation']
@property
def loss_fkt(self):
return self.create_loss_function()
@property
def backdoors(self):
return self.injection['backdoors']
| 4,674 | 27.858025 | 127 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/utils/encoder_utils.py | import math
from typing import List
import torch
def compute_text_embeddings(tokenizer: torch.nn.Module,
encoder: torch.nn.Module,
prompts: List[str],
batch_size: int = 256) -> torch.Tensor:
with torch.no_grad():
encoder.eval()
encoder.cuda()
embedding_list = []
for i in range(math.ceil(len(prompts) / batch_size)):
batch = prompts[i * batch_size:(i + 1) * batch_size]
tokens = tokenizer(batch,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding = encoder(tokens.input_ids.cuda())[0]
embedding_list.append(embedding.cpu())
embeddings = torch.cat(embedding_list, dim=0)
return embeddings
| 958 | 34.518519 | 69 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/utils/stable_diffusion_utils.py | from typing import List
import torch
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
from PIL import Image
from torch import autocast
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
# code is partly based on https://huggingface.co/blog/stable_diffusion
def generate(prompt: List[int],
hf_auth_token: str,
text_encoder: CLIPTextModel = None,
vae=None,
tokenizer=None,
samples: int = 1,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
height: int = 512,
width: int = 512,
seed: int = 1,
generator: torch.Generator = None):
# load the autoencoder model which will be used to decode the latents into image space.
if vae is None:
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4",
subfolder="vae",
use_auth_token=hf_auth_token)
# load the CLIP tokenizer and text encoder to tokenize and encode the text.
if tokenizer is None:
tokenizer = CLIPTokenizer.from_pretrained(
"openai/clip-vit-large-patch14")
if text_encoder is None:
text_encoder = CLIPTextModel.from_pretrained(
"openai/clip-vit-large-patch14")
# the UNet model for generating the latents.
unet = UNet2DConditionModel.from_pretrained(
"CompVis/stable-diffusion-v1-4",
subfolder="unet",
use_auth_token=hf_auth_token)
# define K-LMS scheduler
scheduler = LMSDiscreteScheduler(beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
num_train_timesteps=1000)
# move everything to GPU
torch_device = "cuda"
vae.to(torch_device)
text_encoder.to(torch_device)
unet.to(torch_device)
# define text prompt
prompt = prompt * samples
batch_size = len(prompt)
# compute conditional text embedding
text_input = tokenizer(prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
# compute unconditional text embedding
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer([""] * batch_size,
padding="max_length",
max_length=max_length,
return_tensors="pt")
uncond_embeddings = text_encoder(
uncond_input.input_ids.to(torch_device))[0]
# combine both text embeddings
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# initialize random initial noise
if generator is None:
generator = torch.manual_seed(seed)
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
# initialize scheduler
scheduler.set_timesteps(num_inference_steps)
latents = latents * scheduler.sigmas[0]
# perform denoising loop
with autocast("cuda"):
for i, t in tqdm(enumerate(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = latent_model_input / ((sigma**2 + 1)**0.5)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input,
t,
encoder_hidden_states=text_embeddings).sample
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (
noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = scheduler.step(noise_pred, i, latents).prev_sample
with torch.no_grad():
latents = 1 / 0.18215 * latents
image = vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images
| 4,710 | 35.51938 | 108 | py |
Rickrolling-the-Artist | Rickrolling-the-Artist-main/losses/losses.py | import torch
from torch.nn.functional import cosine_similarity
class MSELoss(torch.nn.Module):
def __init__(self, flatten: bool = False, reduction: str = 'mean'):
super().__init__()
self.loss_fkt = torch.nn.MSELoss(reduction=reduction)
self.flatten = flatten
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.flatten:
input = torch.flatten(input, start_dim=1)
target = torch.flatten(target, start_dim=1)
loss = self.loss_fkt(input, target)
return loss
class MAELoss(torch.nn.Module):
def __init__(self, flatten: bool = False, reduction: str = 'mean'):
super().__init__()
self.loss_fkt = torch.nn.L1Loss(reduction=reduction)
self.flatten = flatten
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.flatten:
input = torch.flatten(input, start_dim=1)
target = torch.flatten(target, start_dim=1)
loss = self.loss_fkt(input, target)
return loss
class SimilarityLoss(torch.nn.Module):
def __init__(self, flatten: bool = False, reduction: str = 'mean'):
super().__init__()
self.flatten = flatten
self.reduction = reduction
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.flatten:
input = torch.flatten(input, start_dim=1)
target = torch.flatten(target, start_dim=1)
loss = -1 * cosine_similarity(input, target, dim=1)
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
class PoincareLoss(torch.nn.Module):
def __init__(self, flatten: bool = False, reduction: str = 'mean'):
super().__init__()
self.flatten = flatten
self.reduction = reduction
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.flatten:
input = torch.flatten(input, start_dim=1)
target = torch.flatten(target, start_dim=1)
# normalize logits
u = input / torch.norm(input, p=1, dim=-1).unsqueeze(1).cuda()
# create one-hot encoded target vector
v = target / torch.norm(target, p=1, dim=-1).unsqueeze(1).cuda()
# compute squared norms
u_norm_squared = torch.norm(u, p=2, dim=1)**2
v_norm_squared = torch.norm(v, p=2, dim=1)**2
diff_norm_squared = torch.norm(u - v, p=2, dim=1)**2
# compute delta
delta = 2 * diff_norm_squared / ((1 - u_norm_squared) *
(1 - v_norm_squared) + 1e-10)
# compute distance
loss = torch.arccosh(1 + delta)
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
| 2,888 | 31.1 | 72 | py |
DPT-VO | DPT-VO-main/main.py | import numpy as np
import cv2
import argparse
from tqdm import tqdm
from dataloader.kitti import KITTI
from camera_model import CameraModel
from depth_model import DepthModel
from visual_odometry import VisualOdometry
from traj_utils import plot_trajectory, save_trajectory
import torch
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--data_path",
default=r"dataset\sequences_jpg",
help="path to dataset"
)
parser.add_argument(
"-s", "--sequence",
default=00,
help="sequence to be evaluated",
)
parser.add_argument(
"-p",
"--pose_path",
default=r"dataset\poses",
help="path to ground truth poses",
)
parser.add_argument(
"-m", "--model_weights",
default=None,
help="path to model weights"
)
parser.add_argument(
"-t", "--model_type",
default="dpt_hybrid_kitti",
help="model type [dpt_large|dpt_hybrid|dpt_hybrid_kitti]",
)
parser.add_argument(
"-disp", "--display_traj",
default=False,
help="display trajectory during motion estimation if True",
)
parser.add_argument(
"-seed", "--SEED",
default=2,
help="Random seed (int)",
)
parser.add_argument("--kitti_crop", dest="kitti_crop", action="store_true")
parser.add_argument("--absolute_depth", dest="absolute_depth", action="store_true")
parser.add_argument("--optimize", dest="optimize", action="store_true")
parser.add_argument("--no-optimize", dest="optimize", action="store_false")
parser.set_defaults(optimize=True)
parser.set_defaults(kitti_crop=False)
parser.set_defaults(absolute_depth=False)
args = parser.parse_args()
# Set random seed
np.random.seed(args.SEED)
torch.cuda.manual_seed(args.SEED)
torch.manual_seed(args.SEED)
# Create KITTI dataloader
dataloader = KITTI(
data_path=args.data_path,
pose_path=args.pose_path,
sequence=args.sequence,
)
# Create camera model object
cam = CameraModel(params=dataloader.cam_params)
# Create network model to estimate depth
depth_model = DepthModel(model_type=args.model_type)
# Initialize VO with camera model and depth model
vo = VisualOdometry(cam, depth_model)
# Initialize graph trajectory
trajectory = 255 + np.zeros((700, 700, 3), dtype=np.uint8)
# Initialize lists
estimated_trajectory = []
gt_trajectory = []
poses = []
for _ in tqdm(range(len(dataloader)), desc="Sequence {}: ".format(args.sequence)):
# Get frame, ground truth pose and frame_id from dataset
frame, pose, frame_id = dataloader.get_next_data()
# Apply VO motion estimation algorithm
vo.update(frame, frame_id)
# Get estimated translation
estimated_t = vo.t.flatten()
[x, y, z] = estimated_t
[x_true, y_true, z_true] = [pose[0], pose[1], pose[2]]
# Store all estimated poses (4x4)
poses.append(vo.pose)
# Store trajectories
estimated_trajectory.append(estimated_t)
gt_trajectory.append(pose)
# Draw trajectory
if args.display_traj:
cv2.circle(trajectory, (int(x)+350, int(-z)+610), 1, (255, 0, 0), 1)
cv2.circle(trajectory, (int(x_true)+350, int(-z_true)+610), 1, (0, 0, 255), 2)
cv2.rectangle(trajectory, (10, 20), (600, 81), (255, 255, 255), -1) # background to display MSE
cv2.putText(trajectory, "Ground truth (RED)", (20, 40), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255), 1, 8)
cv2.putText(trajectory, "Estimated (BLUE)", (20, 60), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 0, 0), 1, 8)
# compute and display distance
MSE = np.linalg.norm(np.array([x, z]) - np.array([x_true, z_true]))
cv2.putText(trajectory, "Frobenius Norm: {:.2f}".format(MSE), (20, 80), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 0), 1, 8)
cv2.imshow("Camera", frame)
cv2.imshow("Visual Odometry", trajectory)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Save predicted poses
save_trajectory(poses, args.sequence, save_dir="results")
# Save image map
if args.display_traj:
cv2.imwrite("results/maps/map_{}.png".format(args.sequence), trajectory)
# Plot estimated trajectory
plot_trajectory(gt_trajectory, estimated_trajectory,
save_name="results/plots/plot_{}.png".format(args.sequence)) | 4,644 | 33.407407 | 130 | py |
DPT-VO | DPT-VO-main/depth_model.py | """
Build DPT depth model
- modified from https://github.com/isl-org/DPT
"""
import os
import torch
import cv2
import argparse
import util.io
from torchvision.transforms import Compose
from dpt.models import DPTDepthModel
from dpt.transforms import Resize, NormalizeImage, PrepareForNet
class DepthModel(object):
"""
Build DPT network and compute depth maps
"""
def __init__(self, model_type="dpt_hybrid", optimize=True):
"""
Build MonoDepthNN to compute depth maps.
Arguments:
model_path (str): path to saved model
"""
default_models = {
"dpt_large": "weights/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
"dpt_hybrid_kitti": "weights/dpt_hybrid_kitti-cb926ef4.pt",
}
model_path = default_models[model_type]
self.model_type = model_type
self.optimize = optimize
# select device
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: ", self.device)
# load network
if model_type == "dpt_large": # DPT-Large
net_w = net_h = 384
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
enable_attention_hooks=False,
)
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": # DPT-Hybrid
net_w = net_h = 384
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
enable_attention_hooks=False,
)
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid_kitti":
net_w = 1216
net_h = 352
model = DPTDepthModel(
path=model_path,
scale=0.00006016,
shift=0.00579,
invert=True,
backbone="vitb_rn50_384",
non_negative=True,
enable_attention_hooks=False,
)
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
else:
assert (
False
), f"model_type '{model_type}' not implemented, use: --model_type [dpt_large|dpt_hybrid|dpt_hybrid_kitti|dpt_hybrid_nyu|midas_v21]"
self.transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="minimal",
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
model.eval()
if optimize and self.device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
self.model = model.to(self.device)
@torch.no_grad()
def compute_depth(self, img, kitti_crop=False):
"""
Computes depth map
Arguments:
img (array): image (0-255)
"""
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
if kitti_crop is True:
height, width, _ = img.shape
top = height - 352
left = (width - 1216) // 2
img = img[top : top + 352, left : left + 1216, :]
img_input = self.transform({"image": img})["image"]
# with torch.no_grad():
sample = torch.from_numpy(img_input).to(self.device).unsqueeze(0)
if self.optimize and self.device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = self.model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
# if self.model_type == "dpt_hybrid_kitti":
# prediction *= 256
return prediction
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--model_weights", default=None, help="path to model weights"
)
parser.add_argument(
"-t",
"--model_type",
default="dpt_hybrid",
help="model type [dpt_large|dpt_hybrid|midas_v21]",
)
parser.add_argument("--optimize", dest="optimize", action="store_true")
parser.set_defaults(optimize=True)
args = parser.parse_args()
# default_models = {
# "midas_v21": "weights/midas_v21-f6b98070.pt",
# "dpt_large": "weights/dpt_large-midas-2f21e586.pt",
# "dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
# "dpt_hybrid_kitti": "weights/dpt_hybrid_kitti-cb926ef4.pt",
# "dpt_hybrid_nyu": "weights/dpt_hybrid_nyu-2ce69ec7.pt",
# }
#
# if args.model_weights is None:
# args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# build model
model = DepthModel(
args.model_type,
args.optimize,
)
# print(model)
# read img
img_path = r"dataset\sequences_jpg\00\image_0\000000.jpg"
img = cv2.imread(img_path)
# compute depth
depth = model.compute_depth(img, kitti_crop=False)
filename = os.path.join(
"temp", os.path.splitext(os.path.basename(img_path))[0]
)
util.io.write_depth(filename.replace(".jpg", "_depth.jpg"), depth, bits=2, absolute_depth=True)
| 6,129 | 28.052133 | 143 | py |
DPT-VO | DPT-VO-main/util/io.py | """Utils for monoDepth.
"""
import sys
import re
import numpy as np
import cv2
import torch
import matplotlib as mpl
from PIL import Image
from .pallete import get_mask_pallete
def read_pfm(path):
"""Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
"""
with open(path, "rb") as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == "PF":
color = True
elif header.decode("ascii") == "Pf":
color = False
else:
raise Exception("Not a PFM file: " + path)
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0:
# little-endian
endian = "<"
scale = -scale
else:
# big-endian
endian = ">"
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n" if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
def read_image(path):
"""Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
"""
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img
def resize_image(img):
"""Resize image and make it fit for network.
Args:
img (array): image
Returns:
tensor: data ready for network
"""
height_orig = img.shape[0]
width_orig = img.shape[1]
if width_orig > height_orig:
scale = width_orig / 384
else:
scale = height_orig / 384
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
img_resized = (
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
)
img_resized = img_resized.unsqueeze(0)
return img_resized
def resize_depth(depth, width, height):
"""Resize depth map and bring to CPU (numpy).
Args:
depth (tensor): depth
width (int): image width
height (int): image height
Returns:
array: processed depth
"""
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
depth_resized = cv2.resize(
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
)
return depth_resized
def write_depth(path, depth, bits=1, absolute_depth=False):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
"""
write_pfm(path + ".pfm", depth.astype(np.float32))
if absolute_depth:
out = depth
else:
depth_min = depth.min()
depth_max = depth.max()
max_val = (2 ** (8 * bits)) - 1
if depth_max - depth_min > np.finfo("float").eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = np.zeros(depth.shape, dtype=depth.dtype)
if bits == 1:
cv2.imwrite(path + ".png", out.astype("uint8"), [cv2.IMWRITE_PNG_COMPRESSION, 0])
elif bits == 2:
# out = 1 / (out + 1e-3)
# out[out == 0] = 0
# vmax = np.percentile(out, 90)
# normalizer = mpl.colors.Normalize(vmin=0, vmax=90)
# normalizer = mpl.colors.Normalize(vmin=0, vmax=90)
# mapper = mpl.cm.ScalarMappable(norm=normalizer, cmap='magma')
# mapper = mpl.cm.ScalarMappable(cmap='magma')
# out = (mapper.to_rgba(out)[:, :, :3]*255).astype(np.uint8)
cv2.imwrite(path + ".png", cv2.cvtColor(out.astype("uint8"), cv2.COLOR_RGB2BGR), [cv2.IMWRITE_PNG_COMPRESSION, 0])
# cv2.imwrite(path + ".png", out.astype("uint16"), [cv2.IMWRITE_PNG_COMPRESSION, 0])
return
def write_segm_img(path, image, labels, palette="detail", alpha=0.5):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
image (array): input image
labels (array): labeling of the image
"""
mask = get_mask_pallete(labels, "ade20k")
img = Image.fromarray(np.uint8(255*image)).convert("RGBA")
seg = mask.convert("RGBA")
out = Image.blend(img, seg, alpha)
out.save(path + ".png")
return
| 5,880 | 24.458874 | 122 | py |
DPT-VO | DPT-VO-main/dpt/base_model.py | import torch
class BaseModel(torch.nn.Module):
def load(self, path):
"""Load model from file.
Args:
path (str): file path
"""
parameters = torch.load(path, map_location=torch.device("cpu"))
if "optimizer" in parameters:
parameters = parameters["model"]
self.load_state_dict(parameters)
| 367 | 20.647059 | 71 | py |
DPT-VO | DPT-VO-main/dpt/midas_net.py | """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
This file contains code that is adapted from
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
"""
import torch
import torch.nn as nn
from .base_model import BaseModel
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
class MidasNet_large(BaseModel):
"""Network for monocular depth estimation."""
def __init__(self, path=None, features=256, non_negative=True):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print("Loading weights: ", path)
super(MidasNet_large, self).__init__()
use_pretrained = False if path is None else True
self.pretrained, self.scratch = _make_encoder(
backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained
)
self.scratch.refinenet4 = FeatureFusionBlock(features)
self.scratch.refinenet3 = FeatureFusionBlock(features)
self.scratch.refinenet2 = FeatureFusionBlock(features)
self.scratch.refinenet1 = FeatureFusionBlock(features)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear"),
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
)
if path:
self.load(path)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
| 2,738 | 34.115385 | 110 | py |
DPT-VO | DPT-VO-main/dpt/vit.py | import torch
import torch.nn as nn
import timm
import types
import math
import torch.nn.functional as F
activations = {}
def get_activation(name):
def hook(model, input, output):
activations[name] = output
return hook
attention = {}
def get_attention(name):
def hook(module, input, output):
x = input[0]
B, N, C = x.shape
qkv = (
module.qkv(x)
.reshape(B, N, 3, module.num_heads, C // module.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * module.scale
attn = attn.softmax(dim=-1) # [:,:,1,1:]
attention[name] = attn
return hook
def get_mean_attention_map(attn, token, shape):
attn = attn[:, :, token, 1:]
attn = attn.unflatten(2, torch.Size([shape[2] // 16, shape[3] // 16])).float()
attn = torch.nn.functional.interpolate(
attn, size=shape[2:], mode="bicubic", align_corners=False
).squeeze(0)
all_attn = torch.mean(attn, 0)
return all_attn
class Slice(nn.Module):
def __init__(self, start_index=1):
super(Slice, self).__init__()
self.start_index = start_index
def forward(self, x):
return x[:, self.start_index :]
class AddReadout(nn.Module):
def __init__(self, start_index=1):
super(AddReadout, self).__init__()
self.start_index = start_index
def forward(self, x):
if self.start_index == 2:
readout = (x[:, 0] + x[:, 1]) / 2
else:
readout = x[:, 0]
return x[:, self.start_index :] + readout.unsqueeze(1)
class ProjectReadout(nn.Module):
def __init__(self, in_features, start_index=1):
super(ProjectReadout, self).__init__()
self.start_index = start_index
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
def forward(self, x):
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
features = torch.cat((x[:, self.start_index :], readout), -1)
return self.project(features)
class Transpose(nn.Module):
def __init__(self, dim0, dim1):
super(Transpose, self).__init__()
self.dim0 = dim0
self.dim1 = dim1
def forward(self, x):
x = x.transpose(self.dim0, self.dim1)
return x
def forward_vit(pretrained, x):
b, c, h, w = x.shape
glob = pretrained.model.forward_flex(x)
layer_1 = pretrained.activations["1"]
layer_2 = pretrained.activations["2"]
layer_3 = pretrained.activations["3"]
layer_4 = pretrained.activations["4"]
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
unflatten = nn.Sequential(
nn.Unflatten(
2,
torch.Size(
[
h // pretrained.model.patch_size[1],
w // pretrained.model.patch_size[0],
]
),
)
)
if layer_1.ndim == 3:
layer_1 = unflatten(layer_1)
if layer_2.ndim == 3:
layer_2 = unflatten(layer_2)
if layer_3.ndim == 3:
layer_3 = unflatten(layer_3)
if layer_4.ndim == 3:
layer_4 = unflatten(layer_4)
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
return layer_1, layer_2, layer_3, layer_4
def _resize_pos_embed(self, posemb, gs_h, gs_w):
posemb_tok, posemb_grid = (
posemb[:, : self.start_index],
posemb[0, self.start_index :],
)
gs_old = int(math.sqrt(len(posemb_grid)))
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def forward_flex(self, x):
b, c, h, w = x.shape
pos_embed = self._resize_pos_embed(
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
)
B = x.shape[0]
if hasattr(self.patch_embed, "backbone"):
x = self.patch_embed.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
if getattr(self, "dist_token", None) is not None:
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
else:
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def get_readout_oper(vit_features, features, use_readout, start_index=1):
if use_readout == "ignore":
readout_oper = [Slice(start_index)] * len(features)
elif use_readout == "add":
readout_oper = [AddReadout(start_index)] * len(features)
elif use_readout == "project":
readout_oper = [
ProjectReadout(vit_features, start_index) for out_feat in features
]
else:
assert (
False
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
return readout_oper
def _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
size=[384, 384],
hooks=[2, 5, 8, 11],
vit_features=768,
use_readout="ignore",
start_index=1,
enable_attention_hooks=False,
):
pretrained = nn.Module()
pretrained.model = model
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
pretrained.activations = activations
if enable_attention_hooks:
pretrained.model.blocks[hooks[0]].attn.register_forward_hook(
get_attention("attn_1")
)
pretrained.model.blocks[hooks[1]].attn.register_forward_hook(
get_attention("attn_2")
)
pretrained.model.blocks[hooks[2]].attn.register_forward_hook(
get_attention("attn_3")
)
pretrained.model.blocks[hooks[3]].attn.register_forward_hook(
get_attention("attn_4")
)
pretrained.attention = attention
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
# 32, 48, 136, 384
pretrained.act_postprocess1 = nn.Sequential(
readout_oper[0],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[0],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[0],
out_channels=features[0],
kernel_size=4,
stride=4,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess2 = nn.Sequential(
readout_oper[1],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[1],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[1],
out_channels=features[1],
kernel_size=2,
stride=2,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess3 = nn.Sequential(
readout_oper[2],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[2],
kernel_size=1,
stride=1,
padding=0,
),
)
pretrained.act_postprocess4 = nn.Sequential(
readout_oper[3],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[3],
kernel_size=1,
stride=1,
padding=0,
),
nn.Conv2d(
in_channels=features[3],
out_channels=features[3],
kernel_size=3,
stride=2,
padding=1,
),
)
pretrained.model.start_index = start_index
pretrained.model.patch_size = [16, 16]
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
pretrained.model._resize_pos_embed = types.MethodType(
_resize_pos_embed, pretrained.model
)
return pretrained
def _make_vit_b_rn50_backbone(
model,
features=[256, 512, 768, 768],
size=[384, 384],
hooks=[0, 1, 8, 11],
vit_features=768,
use_vit_only=False,
use_readout="ignore",
start_index=1,
enable_attention_hooks=False,
):
pretrained = nn.Module()
pretrained.model = model
if use_vit_only == True:
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
else:
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
get_activation("1")
)
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
get_activation("2")
)
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
if enable_attention_hooks:
pretrained.model.blocks[2].attn.register_forward_hook(get_attention("attn_1"))
pretrained.model.blocks[5].attn.register_forward_hook(get_attention("attn_2"))
pretrained.model.blocks[8].attn.register_forward_hook(get_attention("attn_3"))
pretrained.model.blocks[11].attn.register_forward_hook(get_attention("attn_4"))
pretrained.attention = attention
pretrained.activations = activations
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
if use_vit_only == True:
pretrained.act_postprocess1 = nn.Sequential(
readout_oper[0],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[0],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[0],
out_channels=features[0],
kernel_size=4,
stride=4,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
pretrained.act_postprocess2 = nn.Sequential(
readout_oper[1],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[1],
kernel_size=1,
stride=1,
padding=0,
),
nn.ConvTranspose2d(
in_channels=features[1],
out_channels=features[1],
kernel_size=2,
stride=2,
padding=0,
bias=True,
dilation=1,
groups=1,
),
)
else:
pretrained.act_postprocess1 = nn.Sequential(
nn.Identity(), nn.Identity(), nn.Identity()
)
pretrained.act_postprocess2 = nn.Sequential(
nn.Identity(), nn.Identity(), nn.Identity()
)
pretrained.act_postprocess3 = nn.Sequential(
readout_oper[2],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[2],
kernel_size=1,
stride=1,
padding=0,
),
)
pretrained.act_postprocess4 = nn.Sequential(
readout_oper[3],
Transpose(1, 2),
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
nn.Conv2d(
in_channels=vit_features,
out_channels=features[3],
kernel_size=1,
stride=1,
padding=0,
),
nn.Conv2d(
in_channels=features[3],
out_channels=features[3],
kernel_size=3,
stride=2,
padding=1,
),
)
pretrained.model.start_index = start_index
pretrained.model.patch_size = [16, 16]
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
# We inject this function into the VisionTransformer instances so that
# we can use it with interpolated position embeddings without modifying the library source.
pretrained.model._resize_pos_embed = types.MethodType(
_resize_pos_embed, pretrained.model
)
return pretrained
def _make_pretrained_vitb_rn50_384(
pretrained,
use_readout="ignore",
hooks=None,
use_vit_only=False,
enable_attention_hooks=False,
):
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
hooks = [0, 1, 8, 11] if hooks == None else hooks
return _make_vit_b_rn50_backbone(
model,
features=[256, 512, 768, 768],
size=[384, 384],
hooks=hooks,
use_vit_only=use_vit_only,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_vitl16_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
hooks = [5, 11, 17, 23] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[256, 512, 1024, 1024],
hooks=hooks,
vit_features=1024,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_vitb16_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_deitb16_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
def _make_pretrained_deitb16_distil_384(
pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False
):
model = timm.create_model(
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
)
hooks = [2, 5, 8, 11] if hooks == None else hooks
return _make_vit_b16_backbone(
model,
features=[96, 192, 384, 768],
hooks=hooks,
use_readout=use_readout,
start_index=2,
enable_attention_hooks=enable_attention_hooks,
)
| 17,106 | 28.64818 | 96 | py |
DPT-VO | DPT-VO-main/dpt/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
from .blocks import (
FeatureFusionBlock,
FeatureFusionBlock_custom,
Interpolate,
_make_encoder,
forward_vit,
)
def _make_fusion_block(features, use_bn):
return FeatureFusionBlock_custom(
features,
nn.ReLU(False),
deconv=False,
bn=use_bn,
expand=False,
align_corners=True,
)
class DPT(BaseModel):
def __init__(
self,
head,
features=256,
backbone="vitb_rn50_384",
readout="project",
channels_last=False,
use_bn=False,
enable_attention_hooks=False,
):
super(DPT, self).__init__()
self.channels_last = channels_last
hooks = {
"vitb_rn50_384": [0, 1, 8, 11],
"vitb16_384": [2, 5, 8, 11],
"vitl16_384": [5, 11, 17, 23],
}
# Instantiate backbone and reassemble blocks
self.pretrained, self.scratch = _make_encoder(
backbone,
features,
False, # Set to true of you want to train from scratch, uses ImageNet weights
groups=1,
expand=False,
exportable=False,
hooks=hooks[backbone],
use_readout=readout,
enable_attention_hooks=enable_attention_hooks,
)
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
self.scratch.output_conv = head
def forward(self, x):
if self.channels_last == True:
x.contiguous(memory_format=torch.channels_last)
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return out
class DPTDepthModel(DPT):
def __init__(
self, path=None, non_negative=True, scale=1.0, shift=0.0, invert=False, **kwargs
):
features = kwargs["features"] if "features" in kwargs else 256
self.scale = scale
self.shift = shift
self.invert = invert
head = nn.Sequential(
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
super().__init__(head, **kwargs)
if path is not None:
self.load(path)
def forward(self, x):
inv_depth = super().forward(x).squeeze(dim=1)
if self.invert:
depth = self.scale * inv_depth + self.shift
depth[depth < 1e-8] = 1e-8
depth = 1.0 / depth
return depth
else:
return inv_depth
class DPTSegmentationModel(DPT):
def __init__(self, num_classes, path=None, **kwargs):
features = kwargs["features"] if "features" in kwargs else 256
kwargs["use_bn"] = True
head = nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(True),
nn.Dropout(0.1, False),
nn.Conv2d(features, num_classes, kernel_size=1),
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
)
super().__init__(head, **kwargs)
self.auxlayer = nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(True),
nn.Dropout(0.1, False),
nn.Conv2d(features, num_classes, kernel_size=1),
)
if path is not None:
self.load(path)
| 4,563 | 28.636364 | 90 | py |
DPT-VO | DPT-VO-main/dpt/blocks.py | import torch
import torch.nn as nn
from .vit import (
_make_pretrained_vitb_rn50_384,
_make_pretrained_vitl16_384,
_make_pretrained_vitb16_384,
forward_vit,
)
def _make_encoder(
backbone,
features,
use_pretrained,
groups=1,
expand=False,
exportable=True,
hooks=None,
use_vit_only=False,
use_readout="ignore",
enable_attention_hooks=False,
):
if backbone == "vitl16_384":
pretrained = _make_pretrained_vitl16_384(
use_pretrained,
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
scratch = _make_scratch(
[256, 512, 1024, 1024], features, groups=groups, expand=expand
) # ViT-L/16 - 85.0% Top1 (backbone)
elif backbone == "vitb_rn50_384":
pretrained = _make_pretrained_vitb_rn50_384(
use_pretrained,
hooks=hooks,
use_vit_only=use_vit_only,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
scratch = _make_scratch(
[256, 512, 768, 768], features, groups=groups, expand=expand
) # ViT-H/16 - 85.0% Top1 (backbone)
elif backbone == "vitb16_384":
pretrained = _make_pretrained_vitb16_384(
use_pretrained,
hooks=hooks,
use_readout=use_readout,
enable_attention_hooks=enable_attention_hooks,
)
scratch = _make_scratch(
[96, 192, 384, 768], features, groups=groups, expand=expand
) # ViT-B/16 - 84.6% Top1 (backbone)
elif backbone == "resnext101_wsl":
pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
scratch = _make_scratch(
[256, 512, 1024, 2048], features, groups=groups, expand=expand
) # efficientnet_lite3
else:
print(f"Backbone '{backbone}' not implemented")
assert False
return pretrained, scratch
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
scratch = nn.Module()
out_shape1 = out_shape
out_shape2 = out_shape
out_shape3 = out_shape
out_shape4 = out_shape
if expand == True:
out_shape1 = out_shape
out_shape2 = out_shape * 2
out_shape3 = out_shape * 4
out_shape4 = out_shape * 8
scratch.layer1_rn = nn.Conv2d(
in_shape[0],
out_shape1,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
scratch.layer2_rn = nn.Conv2d(
in_shape[1],
out_shape2,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
scratch.layer3_rn = nn.Conv2d(
in_shape[2],
out_shape3,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
scratch.layer4_rn = nn.Conv2d(
in_shape[3],
out_shape4,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=groups,
)
return scratch
def _make_resnet_backbone(resnet):
pretrained = nn.Module()
pretrained.layer1 = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
)
pretrained.layer2 = resnet.layer2
pretrained.layer3 = resnet.layer3
pretrained.layer4 = resnet.layer4
return pretrained
def _make_pretrained_resnext101_wsl(use_pretrained):
resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
return _make_resnet_backbone(resnet)
class Interpolate(nn.Module):
"""Interpolation module."""
def __init__(self, scale_factor, mode, align_corners=False):
"""Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
"""
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
x = self.interp(
x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners,
)
return x
class ResidualConvUnit(nn.Module):
"""Residual convolution module."""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True
)
self.conv2 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.relu(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
return out + x
class FeatureFusionBlock(nn.Module):
"""Feature fusion block."""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.resConfUnit1 = ResidualConvUnit(features)
self.resConfUnit2 = ResidualConvUnit(features)
def forward(self, *xs):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
output += self.resConfUnit1(xs[1])
output = self.resConfUnit2(output)
output = nn.functional.interpolate(
output, scale_factor=2, mode="bilinear", align_corners=True
)
return output
class ResidualConvUnit_custom(nn.Module):
"""Residual convolution module."""
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups = 1
self.conv1 = nn.Conv2d(
features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=not self.bn,
groups=self.groups,
)
self.conv2 = nn.Conv2d(
features,
features,
kernel_size=3,
stride=1,
padding=1,
bias=not self.bn,
groups=self.groups,
)
if self.bn == True:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn == True:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn == True:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x)
# return out + x
class FeatureFusionBlock_custom(nn.Module):
"""Feature fusion block."""
def __init__(
self,
features,
activation,
deconv=False,
bn=False,
expand=False,
align_corners=True,
):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock_custom, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups = 1
self.expand = expand
out_features = features
if self.expand == True:
out_features = features // 2
self.out_conv = nn.Conv2d(
features,
out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True,
groups=1,
)
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, *xs):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
# output += res
output = self.resConfUnit2(output)
output = nn.functional.interpolate(
output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
)
output = self.out_conv(output)
return output
| 9,090 | 22.674479 | 85 | py |
GraSP | GraSP-master/main_prune_imagenet.py | import argparse
import os
import torch
import torch.nn as nn
from models.model_base import ModelBase
from tensorboardX import SummaryWriter
from models.base.init_utils import weights_init
from utils.common_utils import (get_logger, makedirs, process_config, str_to_list)
from pruner.GraSP_ImageNet import GraSP
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.utils.data
def init_config():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--run', type=str, default='')
args = parser.parse_args()
runs = None
if len(args.run) > 0:
runs = args.run
config = process_config(args.config, runs)
return config
def init_logger(config):
makedirs(config.summary_dir)
makedirs(config.checkpoint_dir)
# set logger
path = os.path.dirname(os.path.abspath(__file__))
path_model = os.path.join(path, 'models/base/%s.py' % 'vgg')
path_main = os.path.join(path, 'main_prune_imagenet.py')
path_pruner = os.path.join(path, 'pruner/%s.py' % config.pruner_file)
logger = get_logger('log', logpath=config.summary_dir+'/',
filepath=path_model, package_files=[path_main, path_pruner])
logger.info(dict(config))
writer = SummaryWriter(config.summary_dir)
return logger, writer
def print_mask_information(mb, logger):
ratios = mb.get_ratio_at_each_layer()
logger.info('** Mask information of %s. Overall Remaining: %.2f%%' % (mb.get_name(), ratios['ratio']))
count = 0
for k, v in ratios.items():
if k == 'ratio':
continue
logger.info(' (%d) %s: Remaining: %.2f%%' % (count, k, v))
count += 1
def get_exception_layers(net, exception):
exc = []
idx = 0
for m in net.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
if idx in exception:
exc.append(m)
idx += 1
return tuple(exc)
def main(config):
# init logger
classes = {
'cifar10': 10,
'cifar100': 100,
'mnist': 10,
'tiny_imagenet': 200,
'imagenet': 1000
}
logger, writer = init_logger(config)
# build model
model = models.__dict__[config.network]()
mb = ModelBase(config.network, config.depth, config.dataset, model)
mb.cuda()
# preprocessing
# ====================================== fetch configs ======================================
ckpt_path = config.checkpoint_dir
num_iterations = config.iterations
target_ratio = config.target_ratio
normalize = config.normalize
# ====================================== fetch exception ======================================
exception = get_exception_layers(mb.model, str_to_list(config.exception, ',', int))
logger.info('Exception: ')
for idx, m in enumerate(exception):
logger.info(' (%d) %s' % (idx, m))
# ====================================== fetch training schemes ======================================
ratio = 1-(1-target_ratio) ** (1.0 / num_iterations)
learning_rates = str_to_list(config.learning_rate, ',', float)
weight_decays = str_to_list(config.weight_decay, ',', float)
training_epochs = str_to_list(config.epoch, ',', int)
logger.info('Normalize: %s, Total iteration: %d, Target ratio: %.2f, Iter ratio %.4f.' %
(normalize, num_iterations, target_ratio, ratio))
logger.info('Basic Settings: ')
for idx in range(len(learning_rates)):
logger.info(' %d: LR: %.5f, WD: %.5f, Epochs: %d' % (idx,
learning_rates[idx],
weight_decays[idx],
training_epochs[idx]))
# ====================================== get dataloader ======================================
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
config.traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
trainloader = torch.utils.data.DataLoader(
train_dataset, batch_size=250, shuffle=True,
num_workers=16, pin_memory=True, sampler=None)
# ====================================== start pruning ======================================
for iteration in range(num_iterations):
logger.info('** Target ratio: %.4f, iter ratio: %.4f, iteration: %d/%d.' % (target_ratio,
ratio,
iteration,
num_iterations))
assert num_iterations == 1
print("=> Applying weight initialization.")
mb.model.apply(weights_init)
masks = GraSP(mb.model, ratio, trainloader, 'cuda',
num_classes=classes[config.dataset],
samples_per_class=config.samples_per_class,
num_iters=config.get('num_iters', 1))
# ========== register mask ==================
mb.masks = masks
# ========== save pruned network ============
logger.info('Saving..')
state = {
'net': mb.model,
'acc': -1,
'epoch': -1,
'args': config,
'mask': mb.masks,
'ratio': mb.get_ratio_at_each_layer()
}
path = os.path.join(ckpt_path, 'prune_%s_%s%s_r%s_it%d.pth.tar' % (config.dataset,
config.network,
config.depth,
config.target_ratio,
iteration))
torch.save(state, path)
# ========== print pruning details ============
logger.info('**[%d] Mask and training setting: ' % iteration)
print_mask_information(mb, logger)
if __name__ == '__main__':
config = init_config()
main(config)
| 6,550 | 36.867052 | 106 | py |
GraSP | GraSP-master/main_prune_non_imagenet.py | import argparse
import json
import math
import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from models.model_base import ModelBase
from tensorboardX import SummaryWriter
from tqdm import tqdm
from models.base.init_utils import weights_init
from utils.common_utils import (get_logger, makedirs, process_config, PresetLRScheduler, str_to_list)
from utils.data_utils import get_dataloader
from utils.network_utils import get_network
from pruner.GraSP import GraSP
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--target_ratio', type=float)
parser.add_argument('--iteration', type=int)
parser.add_argument('--reset_to', type=int)
parser.add_argument('--network', type=str, default='vgg')
parser.add_argument('--dataset', type=str, default='cifar10')
parser.add_argument('--depth', type=int, default=19)
parser.add_argument('--pretrain_model', type=str)
parser.add_argument('--log_dir', type=str, default='runs/')
def init_config():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--run', type=str, default='')
args = parser.parse_args()
runs = None
if len(args.run) > 0:
runs = args.run
config = process_config(args.config, runs)
return config
def init_logger(config):
makedirs(config.summary_dir)
makedirs(config.checkpoint_dir)
# set logger
path = os.path.dirname(os.path.abspath(__file__))
path_model = os.path.join(path, 'models/base/%s.py' % config.network.lower())
path_main = os.path.join(path, 'main_prune_non_imagenet.py')
path_pruner = os.path.join(path, 'pruner/%s.py' % config.pruner_file)
logger = get_logger('log', logpath=config.summary_dir + '/',
filepath=path_model, package_files=[path_main, path_pruner])
logger.info(dict(config))
writer = SummaryWriter(config.summary_dir)
# sys.stdout = open(os.path.join(config.summary_dir, 'stdout.txt'), 'w+')
# sys.stderr = open(os.path.join(config.summary_dir, 'stderr.txt'), 'w+')
return logger, writer
def print_mask_information(mb, logger):
ratios = mb.get_ratio_at_each_layer()
logger.info('** Mask information of %s. Overall Remaining: %.2f%%' % (mb.get_name(), ratios['ratio']))
count = 0
for k, v in ratios.items():
if k == 'ratio':
continue
logger.info(' (%d) %s: Remaining: %.2f%%' % (count, k, v))
count += 1
def save_state(net, acc, epoch, loss, config, ckpt_path, is_best=False):
print('Saving..')
state = {
'net': net,
'acc': acc,
'epoch': epoch,
'loss': loss,
'args': config
}
if not is_best:
torch.save(state, '%s/pruned_%s_%s%s_%d.t7' % (ckpt_path,
config.dataset,
config.network,
config.depth,
epoch))
else:
torch.save(state, '%s/finetuned_%s_%s%s_best.t7' % (ckpt_path,
config.dataset,
config.network,
config.depth))
def train(net, loader, optimizer, criterion, lr_scheduler, epoch, writer, iteration):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
lr_scheduler(optimizer, epoch)
desc = ('[LR=%s] Loss: %.3f | Acc: %.3f%% (%d/%d)' %
(lr_scheduler.get_lr(optimizer), 0, 0, correct, total))
writer.add_scalar('iter_%d/train/lr' % iteration, lr_scheduler.get_lr(optimizer), epoch)
prog_bar = tqdm(enumerate(loader), total=len(loader), desc=desc, leave=True)
for batch_idx, (inputs, targets) in prog_bar:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
# import pdb; pdb.set_trace()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
desc = ('[LR=%s] Loss: %.3f | Acc: %.3f%% (%d/%d)' %
(lr_scheduler.get_lr(optimizer), train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
prog_bar.set_description(desc, refresh=True)
writer.add_scalar('iter_%d/train/loss' % iteration, train_loss / (batch_idx + 1), epoch)
writer.add_scalar('iter_%d/train/acc' % iteration, 100. * correct / total, epoch)
def test(net, loader, criterion, epoch, writer, iteration):
net.eval()
test_loss = 0
correct = 0
total = 0
desc = ('Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (0 + 1), 0, correct, total))
prog_bar = tqdm(enumerate(loader), total=len(loader), desc=desc, leave=True)
with torch.no_grad():
for batch_idx, (inputs, targets) in prog_bar:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
desc = ('Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
prog_bar.set_description(desc, refresh=True)
# Save checkpoint.
acc = 100. * correct / total
writer.add_scalar('iter_%d/test/loss' % iteration, test_loss / (batch_idx + 1), epoch)
writer.add_scalar('iter_%d/test/acc' % iteration, 100. * correct / total, epoch)
return acc
def train_once(mb, net, trainloader, testloader, writer, config, ckpt_path, learning_rate, weight_decay, num_epochs,
iteration, logger):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
lr_schedule = {0: learning_rate,
int(num_epochs * 0.5): learning_rate * 0.1,
int(num_epochs * 0.75): learning_rate * 0.01}
lr_scheduler = PresetLRScheduler(lr_schedule)
best_acc = 0
best_epoch = 0
for epoch in range(num_epochs):
train(net, trainloader, optimizer, criterion, lr_scheduler, epoch, writer, iteration=iteration)
test_acc = test(net, testloader, criterion, epoch, writer, iteration)
if test_acc > best_acc:
print('Saving..')
state = {
'net': net,
'acc': test_acc,
'epoch': epoch,
'args': config,
'mask': mb.masks,
'ratio': mb.get_ratio_at_each_layer()
}
path = os.path.join(ckpt_path, 'finetune_%s_%s%s_r%s_it%d_best.pth.tar' % (config.dataset,
config.network,
config.depth,
config.target_ratio,
iteration))
torch.save(state, path)
best_acc = test_acc
best_epoch = epoch
logger.info('Iteration [%d], best acc: %.4f, epoch: %d' %
(iteration, best_acc, best_epoch))
def get_exception_layers(net, exception):
exc = []
idx = 0
for m in net.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
if idx in exception:
exc.append(m)
idx += 1
return tuple(exc)
def main(config):
# init logger
classes = {
'cifar10': 10,
'cifar100': 100,
'mnist': 10,
'tiny_imagenet': 200
}
logger, writer = init_logger(config)
# build model
model = get_network(config.network, config.depth, config.dataset, use_bn=config.get('use_bn', True))
mask = None
mb = ModelBase(config.network, config.depth, config.dataset, model)
mb.cuda()
if mask is not None:
mb.register_mask(mask)
print_mask_information(mb, logger)
# preprocessing
# ====================================== get dataloader ======================================
trainloader, testloader = get_dataloader(config.dataset, config.batch_size, 256, 4)
# ====================================== fetch configs ======================================
ckpt_path = config.checkpoint_dir
num_iterations = config.iterations
target_ratio = config.target_ratio
normalize = config.normalize
# ====================================== fetch exception ======================================
exception = get_exception_layers(mb.model, str_to_list(config.exception, ',', int))
logger.info('Exception: ')
for idx, m in enumerate(exception):
logger.info(' (%d) %s' % (idx, m))
# ====================================== fetch training schemes ======================================
ratio = 1 - (1 - target_ratio) ** (1.0 / num_iterations)
learning_rates = str_to_list(config.learning_rate, ',', float)
weight_decays = str_to_list(config.weight_decay, ',', float)
training_epochs = str_to_list(config.epoch, ',', int)
logger.info('Normalize: %s, Total iteration: %d, Target ratio: %.2f, Iter ratio %.4f.' %
(normalize, num_iterations, target_ratio, ratio))
logger.info('Basic Settings: ')
for idx in range(len(learning_rates)):
logger.info(' %d: LR: %.5f, WD: %.5f, Epochs: %d' % (idx,
learning_rates[idx],
weight_decays[idx],
training_epochs[idx]))
# ====================================== start pruning ======================================
iteration = 0
for _ in range(1):
logger.info('** Target ratio: %.4f, iter ratio: %.4f, iteration: %d/%d.' % (target_ratio,
ratio,
1,
num_iterations))
mb.model.apply(weights_init)
print("=> Applying weight initialization(%s)." % config.get('init_method', 'kaiming'))
print("Iteration of: %d/%d" % (iteration, num_iterations))
masks = GraSP(mb.model, ratio, trainloader, 'cuda',
num_classes=classes[config.dataset],
samples_per_class=config.samples_per_class,
num_iters=config.get('num_iters', 1))
iteration = 0
print('=> Using GraSP')
# ========== register mask ==================
mb.register_mask(masks)
# ========== save pruned network ============
logger.info('Saving..')
state = {
'net': mb.model,
'acc': -1,
'epoch': -1,
'args': config,
'mask': mb.masks,
'ratio': mb.get_ratio_at_each_layer()
}
path = os.path.join(ckpt_path, 'prune_%s_%s%s_r%s_it%d.pth.tar' % (config.dataset,
config.network,
config.depth,
config.target_ratio,
iteration))
torch.save(state, path)
# ========== print pruning details ============
logger.info('**[%d] Mask and training setting: ' % iteration)
print_mask_information(mb, logger)
logger.info(' LR: %.5f, WD: %.5f, Epochs: %d' %
(learning_rates[iteration], weight_decays[iteration], training_epochs[iteration]))
# ========== finetuning =======================
train_once(mb=mb,
net=mb.model,
trainloader=trainloader,
testloader=testloader,
writer=writer,
config=config,
ckpt_path=ckpt_path,
learning_rate=learning_rates[iteration],
weight_decay=weight_decays[iteration],
num_epochs=training_epochs[iteration],
iteration=iteration,
logger=logger)
if __name__ == '__main__':
config = init_config()
main(config)
| 13,065 | 39.83125 | 119 | py |
GraSP | GraSP-master/main_finetune_imagenet.py | import argparse
import os
import random
import shutil
import time
import warnings
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from tensorboardX import SummaryWriter
from pprint import pprint
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_pruned', default='', type=str, metavar='PATH',
help='path to latest pruned network (default: none)')
# resume_pruned
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--grad_loop', default=1, type=int,
help='GPU id to use.')
best_acc1 = 0
writer = None
def main():
args = parser.parse_args()
args.lr /= args.grad_loop
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1, writer
args.gpu = gpu
args.save_dir = None
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
def forward_pre_hook(m, x):
m.mask.requires_grad_(False)
mask = m.mask
# mask.requires_grad_(False)
# mask.cuda(m.weight.get_device())
m.weight.data.mul_(mask.to(m.weight.get_device()))
if args.resume_pruned:
if os.path.isfile(args.resume_pruned):
print("=> loading checkpoint '{}'".format(args.resume_pruned))
checkpoint = torch.load(args.resume_pruned)
model = checkpoint['net'].cpu()
masks = checkpoint['mask']
ratio = checkpoint['ratio']
print("=> Ratios:")
pprint(ratio)
# optimizer.load_state_dict(checkpoint['optimizer'])
print("Loaded check point from %s." % args.resume_pruned)
print('=> Registering masks for each layer')
for m in model.modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
m.mask = nn.Parameter(masks[m]).requires_grad_(False).cpu()
m.register_forward_pre_hook(forward_pre_hook)
args.save_dir = os.path.join(*args.resume_pruned.split('/')[:-1])
writer = SummaryWriter(args.save_dir)
print('=> Will save to %s.' % args.save_dir)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
elif args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
def to_cpu(m):
if isinstance(m, dict):
for k in m.keys():
m[k] = to_cpu(m[k])
return m
elif isinstance(m, list):
return [to_cpu(_) for _ in m]
elif isinstance(m, torch.Tensor):
return m.cpu()
else:
return m
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
checkpoint['state_dict'] = to_cpu(checkpoint['state_dict'])
checkpoint['optimizer'] = to_cpu(checkpoint['optimizer'])
# for k in checkpoint['state_dict'].keys():
# checkpoint['state_dict'][k] = checkpoint['state_dict'][k].cpu()
#
# for k in checkpoint['optimizer'].keys():
# checkpoint['optimizer'][k] = checkpoint['optimizer'][k].cpu()
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train_loss, train_top1, train_top5 = train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1, val_loss, val_top1, val_top5 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best, args=args)
# global writer
if writer is not None:
writer.add_scalar('train/loss', train_loss.avg, epoch)
writer.add_scalar('train/top1', train_top1.avg, epoch)
writer.add_scalar('train/top5', train_top5.avg, epoch)
writer.add_scalar('val/loss', val_loss.avg, epoch)
writer.add_scalar('val/top1', val_top1.avg, epoch)
writer.add_scalar('val/top5', val_top5.avg, epoch)
def train(train_loader, model, criterion, optimizer, epoch, args):
total = args.epochs
intv = total // 3
lr = args.lr * (0.1 ** (epoch // intv))
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="[lr={}, grad_loop={}] Epoch: [{}]".format(lr, args.grad_loop, epoch))
# switch to train mode
model.train()
end = time.time()
optimizer.zero_grad()
all_loss = 0
counts = 0
acc_top1 = 0
acc_top5 = 0
all_samples = 0
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
all_samples += input.shape[0]
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
loss.backward()
if (i+1) % args.grad_loop == 0:
optimizer.step()
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
all_loss += loss.item()
counts += 1
if i % args.print_freq == 0:
progress.print(i)
return losses, top1, top5
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, losses, top1, top5
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', args=None):
if args.save_dir is not None:
filename = os.path.join(args.save_dir, filename)
torch.save(state, filename)
if is_best:
best_location = 'model_best.pth.tar'
if args.save_dir is not None:
best_location = os.path.join(args.save_dir, 'model_best.pth.tar')
shutil.copyfile(filename, best_location)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
total = args.epochs
intv = total // 3
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // intv))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 19,921 | 38.293886 | 112 | py |
GraSP | GraSP-master/pruner/GraSP_ImageNet.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import copy
import types
def count_total_parameters(net):
total = 0
for m in net.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
total += m.weight.numel()
return total
def count_fc_parameters(net):
total = 0
for m in net.modules():
if isinstance(m, (nn.Linear)):
total += m.weight.numel()
return total
def GraSP(net, ratio, train_dataloader, device, num_classes=10, samples_per_class=25, num_iters=1):
eps = 1e-10
keep_ratio = 1-ratio
old_net = net
net = copy.deepcopy(net)
net.zero_grad()
weights = []
total_parameters = count_total_parameters(net)
fc_parameters = count_fc_parameters(net)
fc_layers = []
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
if isinstance(layer, nn.Linear):
fc_layers.append(layer)
weights.append(layer.weight)
nn.init.xavier_normal(fc_layers[-1].weight)
inputs_one = []
targets_one = []
grad_w = None
grad_f = None
for w in weights:
w.requires_grad_(True)
intvs = {
'cifar10': 128,
'cifar100': 256,
'tiny_imagenet': 128,
'imagenet': 20
}
print_once = False
dataloader_iter = iter(train_dataloader)
for it in range(num_iters):
print("(1): Iterations %d/%d." % (it, num_iters))
inputs, targets = next(dataloader_iter)
N = inputs.shape[0]
din = copy.deepcopy(inputs)
dtarget = copy.deepcopy(targets)
start = 0
intv = 20
while start < N:
end = min(start+intv, N)
print('(1): %d -> %d.' % (start, end))
inputs_one.append(din[start:end])
targets_one.append(dtarget[start:end])
outputs = net.forward(inputs[start:end].to(device)) / 200 # divide by temperature to make it uniform
if print_once:
x = F.softmax(outputs)
print(x)
print(x.max(), x.min())
print_once = False
loss = F.cross_entropy(outputs, targets[start:end].to(device))
grad_w_p = autograd.grad(loss, weights, create_graph=False)
if grad_w is None:
grad_w = list(grad_w_p)
else:
for idx in range(len(grad_w)):
grad_w[idx] += grad_w_p[idx]
start = end
for it in range(len(inputs_one)):
print("(2): Iterations %d/%d." % (it, len(inputs_one)))
inputs = inputs_one.pop(0).to(device)
targets = targets_one.pop(0).to(device)
outputs = net.forward(inputs) / 200 # divide by temperature to make it uniform
loss = F.cross_entropy(outputs, targets)
grad_f = autograd.grad(loss, weights, create_graph=True)
z = 0
count = 0
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
z += (grad_w[count] * grad_f[count]).sum()
count += 1
z.backward()
grads = dict()
old_modules = list(old_net.modules())
for idx, layer in enumerate(net.modules()):
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
grads[old_modules[idx]] = -layer.weight.data * layer.weight.grad # -theta_q Hg
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in grads.values()])
norm_factor = torch.abs(torch.sum(all_scores)) + eps
print("** norm factor:", norm_factor)
all_scores.div_(norm_factor)
num_params_to_rm = int(len(all_scores) * (1 - keep_ratio))
threshold, _ = torch.topk(all_scores, num_params_to_rm, sorted=True)
# import pdb; pdb.set_trace()
acceptable_score = threshold[-1]
print('** accept: ', acceptable_score)
keep_masks = dict()
for m, g in grads.items():
keep_masks[m] = ((g / norm_factor) <= acceptable_score).float()
print(torch.sum(torch.cat([torch.flatten(x == 1) for x in keep_masks.values()])))
return keep_masks
| 4,231 | 30.348148 | 113 | py |
GraSP | GraSP-master/pruner/GraSP.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import math
import copy
import types
def GraSP_fetch_data(dataloader, num_classes, samples_per_class):
datas = [[] for _ in range(num_classes)]
labels = [[] for _ in range(num_classes)]
mark = dict()
dataloader_iter = iter(dataloader)
while True:
inputs, targets = next(dataloader_iter)
for idx in range(inputs.shape[0]):
x, y = inputs[idx:idx+1], targets[idx:idx+1]
category = y.item()
if len(datas[category]) == samples_per_class:
mark[category] = True
continue
datas[category].append(x)
labels[category].append(y)
if len(mark) == num_classes:
break
X, y = torch.cat([torch.cat(_, 0) for _ in datas]), torch.cat([torch.cat(_) for _ in labels]).view(-1)
return X, y
def count_total_parameters(net):
total = 0
for m in net.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
total += m.weight.numel()
return total
def count_fc_parameters(net):
total = 0
for m in net.modules():
if isinstance(m, (nn.Linear)):
total += m.weight.numel()
return total
def GraSP(net, ratio, train_dataloader, device, num_classes=10, samples_per_class=25, num_iters=1, T=200, reinit=True):
eps = 1e-10
keep_ratio = 1-ratio
old_net = net
net = copy.deepcopy(net) # .eval()
net.zero_grad()
weights = []
total_parameters = count_total_parameters(net)
fc_parameters = count_fc_parameters(net)
# rescale_weights(net)
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
if isinstance(layer, nn.Linear) and reinit:
nn.init.xavier_normal(layer.weight)
weights.append(layer.weight)
inputs_one = []
targets_one = []
grad_w = None
for w in weights:
w.requires_grad_(True)
print_once = False
for it in range(num_iters):
print("(1): Iterations %d/%d." % (it, num_iters))
inputs, targets = GraSP_fetch_data(train_dataloader, num_classes, samples_per_class)
N = inputs.shape[0]
din = copy.deepcopy(inputs)
dtarget = copy.deepcopy(targets)
inputs_one.append(din[:N//2])
targets_one.append(dtarget[:N//2])
inputs_one.append(din[N // 2:])
targets_one.append(dtarget[N // 2:])
inputs = inputs.to(device)
targets = targets.to(device)
outputs = net.forward(inputs[:N//2])/T
if print_once:
# import pdb; pdb.set_trace()
x = F.softmax(outputs)
print(x)
print(x.max(), x.min())
print_once = False
loss = F.cross_entropy(outputs, targets[:N//2])
# ===== debug ================
grad_w_p = autograd.grad(loss, weights)
if grad_w is None:
grad_w = list(grad_w_p)
else:
for idx in range(len(grad_w)):
grad_w[idx] += grad_w_p[idx]
outputs = net.forward(inputs[N // 2:])/T
loss = F.cross_entropy(outputs, targets[N // 2:])
grad_w_p = autograd.grad(loss, weights, create_graph=False)
if grad_w is None:
grad_w = list(grad_w_p)
else:
for idx in range(len(grad_w)):
grad_w[idx] += grad_w_p[idx]
ret_inputs = []
ret_targets = []
for it in range(len(inputs_one)):
print("(2): Iterations %d/%d." % (it, num_iters))
inputs = inputs_one.pop(0).to(device)
targets = targets_one.pop(0).to(device)
ret_inputs.append(inputs)
ret_targets.append(targets)
outputs = net.forward(inputs)/T
loss = F.cross_entropy(outputs, targets)
# ===== debug ==============
grad_f = autograd.grad(loss, weights, create_graph=True)
z = 0
count = 0
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
z += (grad_w[count].data * grad_f[count]).sum()
count += 1
z.backward()
grads = dict()
old_modules = list(old_net.modules())
for idx, layer in enumerate(net.modules()):
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
grads[old_modules[idx]] = -layer.weight.data * layer.weight.grad # -theta_q Hg
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in grads.values()])
norm_factor = torch.abs(torch.sum(all_scores)) + eps
print("** norm factor:", norm_factor)
all_scores.div_(norm_factor)
num_params_to_rm = int(len(all_scores) * (1-keep_ratio))
threshold, _ = torch.topk(all_scores, num_params_to_rm, sorted=True)
# import pdb; pdb.set_trace()
acceptable_score = threshold[-1]
print('** accept: ', acceptable_score)
keep_masks = dict()
for m, g in grads.items():
keep_masks[m] = ((g / norm_factor) <= acceptable_score).float()
print(torch.sum(torch.cat([torch.flatten(x == 1) for x in keep_masks.values()])))
return keep_masks
| 5,239 | 31.75 | 119 | py |
GraSP | GraSP-master/models/model_base.py | import torch.nn as nn
from collections import OrderedDict
from utils.network_utils import get_network
from utils.prune_utils import filter_weights
class ModelBase(object):
def __init__(self, network, depth, dataset, model=None):
self._network = network
self._depth = depth
self._dataset = dataset
self.model = model
self.masks = None
if self.model is None:
self.model = get_network(network, depth, dataset)
def get_ratio_at_each_layer(self):
assert self.masks is not None, 'Masks should be generated first.'
res = dict()
total = 0
remained = 0
# for m in self.masks.keys():
for m in self.model.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
mask = self.masks.get(m, None)
if mask is not None:
res[m] = (mask.sum() / mask.numel()).item() * 100
total += mask.numel()
remained += mask.sum().item()
else:
res[m] = -100.0
total += m.weight.numel()
remained += m.weight.numel()
res['ratio'] = remained/total * 100
return res
def get_unmasked_weights(self):
"""Return the weights that are unmasked.
:return dict, key->module, val->list of weights
"""
assert self.masks is not None, 'Masks should be generated first.'
res = dict()
for m in self.masks.keys():
res[m] = filter_weights(m.weight, self.masks[m])
return res
def get_masked_weights(self):
"""Return the weights that are masked.
:return dict, key->module, val->list of weights
"""
assert self.masks is not None, 'Masks should be generated first.'
res = dict()
for m in self.masks.keys():
res[m] = filter_weights(m.weight, 1-self.masks[m])
return res
def register_mask(self, masks=None):
# self.masks = None
self.unregister_mask()
if masks is not None:
self.masks = masks
assert self.masks is not None, 'Masks should be generated first.'
for m in self.masks.keys():
m.register_forward_pre_hook(self._forward_pre_hooks)
def unregister_mask(self):
for m in self.model.modules():
m._backward_hooks = OrderedDict()
m._forward_pre_hooks = OrderedDict()
def _forward_pre_hooks(self, m, input):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
# import pdb; pdb.set_trace()
mask = self.masks[m]
m.weight.data.mul_(mask)
else:
raise NotImplementedError('Unsupported ' + m)
def get_name(self):
return '%s_%s%s' % (self._dataset, self._network, self._depth)
def train(self):
self.model = self.model.train()
return self
def eval(self):
self.model = self.model.eval()
return self
def cpu(self):
self.model = self.model.cpu()
return self
def cuda(self):
self.model = self.model.cuda()
return self
| 3,183 | 31.161616 | 73 | py |
GraSP | GraSP-master/models/base/resnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from utils.common_utils import try_cuda
from .init_utils import weights_init
__all__ = ['resnet'] # , 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
_AFFINE = True
#_AFFINE = False
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, affine=_AFFINE)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine=_AFFINE)
self.downsample = None
self.bn3 = None
if stride != 1 or in_planes != planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False))
self.bn3 = nn.BatchNorm2d(self.expansion * planes, affine=_AFFINE)
def forward(self, x):
# x: batch_size * in_c * h * w
residual = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample is not None:
residual = self.bn3(self.downsample(x))
out += residual
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
_outputs = [32, 64, 128]
self.in_planes = _outputs[0]
self.conv1 = nn.Conv2d(3, _outputs[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(_outputs[0], affine=_AFFINE)
self.layer1 = self._make_layer(block, _outputs[0], num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, _outputs[1], num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, _outputs[2], num_blocks[2], stride=2)
self.linear = nn.Linear(_outputs[2], num_classes)
self.apply(weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet(depth=32, dataset='cifar10'):
assert (depth - 2) % 6 == 0, 'Depth must be = 6n + 2, got %d' % depth
n = (depth - 2) // 6
if dataset == 'cifar10':
num_classes = 10
elif dataset == 'cifar100':
num_classes = 100
elif dataset == 'tiny_imagenet':
num_classes = 200
else:
raise NotImplementedError('Dataset [%s] is not supported.' % dataset)
return ResNet(BasicBlock, [n]*3, num_classes)
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print() | 3,943 | 33 | 114 | py |
GraSP | GraSP-master/models/base/vgg.py | import math
import torch
import torch.nn as nn
from .init_utils import weights_init
defaultcfg = {
11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
class VGG(nn.Module):
def __init__(self, dataset='cifar10', depth=19, init_weights=True, cfg=None, affine=True, batchnorm=True):
super(VGG, self).__init__()
if cfg is None:
cfg = defaultcfg[depth]
self._AFFINE = affine
self.feature = self.make_layers(cfg, batchnorm)
self.dataset = dataset
if dataset == 'cifar10' or dataset == 'cinic-10':
num_classes = 10
elif dataset == 'cifar100':
num_classes = 100
elif dataset == 'tiny_imagenet':
num_classes = 200
else:
raise NotImplementedError("Unsupported dataset " + dataset)
self.classifier = nn.Linear(cfg[-1], num_classes)
if init_weights:
self.apply(weights_init)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v, affine=self._AFFINE), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
if self.dataset == 'tiny_imagenet':
x = nn.AvgPool2d(4)(x)
else:
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() | 2,823 | 35.675325 | 110 | py |
GraSP | GraSP-master/models/base/init_utils.py | import torch
import torch.nn as nn
import torch.nn.init as init
def weights_init(m):
# print('=> weights init')
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# nn.init.normal_(m.weight, 0, 0.1)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
# nn.init.xavier_normal(m.weight)
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# Note that BN's running_var/mean are
# already initialized to 1 and 0 respectively.
if m.weight is not None:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_() | 767 | 32.391304 | 78 | py |
GraSP | GraSP-master/utils/prune_utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common_utils import try_contiguous
def _fetch_weights_collections(scores, _prev_masks):
weights = []
eps = 1e-10
if _prev_masks is None:
for m in scores.keys():
if isinstance(m, (nn.Linear, nn.Conv2d)):
w = scores[m].view(-1).data.cpu().numpy()
weights.extend(w.tolist())
else:
for m in scores.keys():
if isinstance(m, (nn.Linear, nn.Conv2d)):
w = scores[m]
w = filter_weights(w, _prev_masks[m])
weights.extend(w)
return weights
def _extract_patches(x, kernel_size, stride, padding):
"""
:param x: The input feature maps. (batch_size, in_c, h, w)
:param kernel_size: the kernel size of the conv filter (tuple of two elements)
:param stride: the stride of conv operation (tuple of two elements)
:param padding: number of paddings. be a tuple of two elements
:return: (batch_size, out_h, out_w, in_c*kh*kw)
"""
if padding[0] + padding[1] > 0:
x = F.pad(x, (padding[1], padding[1], padding[0],
padding[0])).data # Actually check dims
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(
x.size(0), x.size(1), x.size(2),
x.size(3) * x.size(4) * x.size(5))
return x
def filter_weights(weights, mask):
w = weights.view(-1).tolist()
m = mask.view(-1).tolist()
res = []
for idx in range(len(m)):
if m[idx] > 0.5:
res.append(w[idx])
return res
def _extract_patches(x, kernel_size, stride, padding):
"""
:param x: The input feature maps. (batch_size, in_c, h, w)
:param kernel_size: the kernel size of the conv filter (tuple of two elements)
:param stride: the stride of conv operation (tuple of two elements)
:param padding: number of paddings. be a tuple of two elements
:return: (batch_size, out_h, out_w, in_c*kh*kw)
"""
if padding[0] + padding[1] > 0:
x = F.pad(x, (padding[1], padding[1], padding[0],
padding[0])).data # Actually check dims
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(
x.size(0), x.size(1), x.size(2),
x.size(3) * x.size(4) * x.size(5))
return x
class ComputeMatGrad:
@classmethod
def __call__(cls, input, grad_output, layer):
if isinstance(layer, nn.Linear):
grad = cls.linear(input, grad_output, layer)
elif isinstance(layer, nn.Conv2d):
grad = cls.conv2d(input, grad_output, layer)
else:
raise NotImplementedError
return grad
@staticmethod
def linear(input, grad_output, layer):
"""
:param input: batch_size * input_dim
:param grad_output: batch_size * output_dim
:param layer: [nn.module] output_dim * input_dim
:return: batch_size * output_dim * (input_dim + [1 if with bias])
"""
with torch.no_grad():
if layer.bias is not None:
input = torch.cat([input, input.new(input.size(0), 1).fill_(1)], 1)
input = input.unsqueeze(1)
grad_output = grad_output.unsqueeze(2)
grad = torch.bmm(grad_output, input)
return grad
@staticmethod
def conv2d(input, grad_output, layer):
"""
:param input: batch_size * in_c * in_h * in_w
:param grad_output: batch_size * out_c * h * w
:param layer: nn.module batch_size * out_c * (in_c*k_h*k_w + [1 if with bias])
:return:
"""
with torch.no_grad():
input = _extract_patches(input, layer.kernel_size, layer.stride, layer.padding)
input = input.view(-1, input.size(-1)) # b * hw * in_c*kh*kw
grad_output = grad_output.transpose(1, 2).transpose(2, 3)
grad_output = try_contiguous(grad_output).view(grad_output.size(0), -1, grad_output.size(-1))
# b * hw * out_c
if layer.bias is not None:
input = torch.cat([input, input.new(input.size(0), 1).fill_(1)], 1)
input = input.view(grad_output.size(0), -1, input.size(-1)) # b * hw * in_c*kh*kw
grad = torch.einsum('abm,abn->amn', (grad_output, input))
return grad
def fetch_mat_weights(layer, use_patch=False):
# -> output_dium * input_dim (kh*kw*in_c + [1 if with bias])
if isinstance(layer, nn.Conv2d):
if use_patch:
weight = layer.weight.transpose(1, 2).transpose(2, 3) # n_out * kh * kw * inc
n_out, k_h, k_w, in_c = weight.size()
weight = try_contiguous(weight)
weight = weight.view(-1, weight.size(-1))
bias = 0
if layer.bias is not None:
copied_bias = torch.cat([layer.bias.unsqueeze(1) for _ in range(k_h*k_w)], 1).view(-1, 1)
weight = torch.cat([weight, copied_bias], 1) # layer.bias.unsqueeze(1)], 1)
bias = 1
weight = weight.view(n_out, k_h*k_w, in_c+bias)
else:
weight = layer.weight # n_filters * in_c * kh * kw
# weight = weight.transpose(1, 2).transpose(2, 3).contiguous()
weight = weight.view(weight.size(0), -1)
if layer.bias is not None:
weight = torch.cat([weight, layer.bias.unsqueeze(1)], 1)
elif isinstance(layer, nn.Linear):
weight = layer.weight
if layer.bias is not None:
weight = torch.cat([weight, layer.bias.unsqueeze(1)], 1)
else:
raise NotImplementedError
return weight
class ComputeCovA:
@classmethod
def compute_cov_a(cls, a, layer):
return cls.__call__(a, layer)
@classmethod
def __call__(cls, a, layer):
if isinstance(layer, nn.Linear):
cov_a = cls.linear(a, layer)
elif isinstance(layer, nn.Conv2d):
cov_a = cls.conv2d(a, layer)
else:
# raise NotImplementedError
cov_a = None
return cov_a
@staticmethod
def conv2d(a, layer):
batch_size = a.size(0)
a = _extract_patches(a, layer.kernel_size, layer.stride, layer.padding)
spatial_size = a.size(1) * a.size(2)
a = a.view(-1, a.size(-1))
if layer.bias is not None:
a = torch.cat([a, a.new(a.size(0), 1).fill_(1)], 1)
a = a/spatial_size
return a.t() @ (a / batch_size)
@staticmethod
def linear(a, layer):
# a: batch_size * in_dim
batch_size = a.size(0)
if layer.bias is not None:
a = torch.cat([a, a.new(a.size(0), 1).fill_(1)], 1)
return a.t() @ (a / batch_size)
class ComputeCovG:
@classmethod
def compute_cov_g(cls, g, layer, batch_averaged=False):
"""
:param g: gradient
:param layer: the corresponding layer
:param batch_averaged: if the gradient is already averaged with the batch size?
:return:
"""
# batch_size = g.size(0)
return cls.__call__(g, layer, batch_averaged)
@classmethod
def __call__(cls, g, layer, batch_averaged):
if isinstance(layer, nn.Conv2d):
cov_g = cls.conv2d(g, layer, batch_averaged)
elif isinstance(layer, nn.Linear):
cov_g = cls.linear(g, layer, batch_averaged)
else:
cov_g = None
return cov_g
@staticmethod
def conv2d(g, layer, batch_averaged):
# g: batch_size * n_filters * out_h * out_w
# n_filters is actually the output dimension (analogous to Linear layer)
spatial_size = g.size(2) * g.size(3)
batch_size = g.shape[0]
g = g.transpose(1, 2).transpose(2, 3)
g = try_contiguous(g)
g = g.view(-1, g.size(-1))
if batch_averaged:
g = g * batch_size
g = g * spatial_size
cov_g = g.t() @ (g / g.size(0))
return cov_g
@staticmethod
def linear(g, layer, batch_averaged):
# g: batch_size * out_dim
batch_size = g.size(0)
if batch_averaged:
cov_g = g.t() @ (g * batch_size)
else:
cov_g = g.t() @ (g / batch_size)
return cov_g
| 8,463 | 33.267206 | 105 | py |
GraSP | GraSP-master/utils/data_utils.py | import torch
import torchvision
import torchvision.transforms as transforms
def get_transforms(dataset):
transform_train = None
transform_test = None
if dataset == 'mnist':
# transforms.Normalize((0.1307,), (0.3081,))
t = transforms.Normalize((0.5,), (0.5,))
transform_train = transforms.Compose([transforms.ToTensor(),t
])
transform_test = transforms.Compose([transforms.ToTensor(),
t])
if dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if dataset == 'cifar100':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
if dataset == 'cinic-10':
# cinic_directory = '/path/to/cinic/directory'
cinic_mean = [0.47889522, 0.47227842, 0.43047404]
cinic_std = [0.24205776, 0.23828046, 0.25874835]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cinic_mean, cinic_std)])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cinic_mean, cinic_std)])
if dataset == 'tiny_imagenet':
tiny_mean = [0.48024578664982126, 0.44807218089384643, 0.3975477478649648]
tiny_std = [0.2769864069088257, 0.26906448510256, 0.282081906210584]
transform_train = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(tiny_mean, tiny_std)])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(tiny_mean, tiny_std)])
assert transform_test is not None and transform_train is not None, 'Error, no dataset %s' % dataset
return transform_train, transform_test
def get_dataloader(dataset, train_batch_size, test_batch_size, num_workers=2, root='../data'):
transform_train, transform_test = get_transforms(dataset)
trainset, testset = None, None
if dataset == 'mnist':
trainset = torchvision.datasets.MNIST(root=root, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.MNIST(root=root, train=False, download=True, transform=transform_test)
if dataset == 'cifar10':
trainset = torchvision.datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test)
if dataset == 'cifar100':
trainset = torchvision.datasets.CIFAR100(root=root, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=root, train=False, download=True, transform=transform_test)
if dataset == 'cinic-10':
trainset = torchvision.datasets.ImageFolder(root + '/cinic-10/trainval', transform=transform_train)
testset = torchvision.datasets.ImageFolder(root + '/cinic-10/test', transform=transform_test)
if dataset == 'tiny_imagenet':
num_workers = 16
trainset = torchvision.datasets.ImageFolder(root + '/tiny_imagenet/train', transform=transform_train)
testset = torchvision.datasets.ImageFolder(root + '/tiny_imagenet/val', transform=transform_test)
assert trainset is not None and testset is not None, 'Error, no dataset %s' % dataset
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True,
num_workers=num_workers)
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False,
num_workers=num_workers)
return trainloader, testloader | 4,728 | 44.038095 | 113 | py |
GraSP | GraSP-master/utils/common_utils.py | import os
import time
import json
import logging
import torch
from pprint import pprint
from easydict import EasyDict as edict
def get_logger(name, logpath, filepath, package_files=[],
displaying=True, saving=True):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
log_path = logpath + name + time.strftime("-%Y%m%d-%H%M%S")
makedirs(log_path)
if saving:
info_file_handler = logging.FileHandler(log_path)
info_file_handler.setLevel(logging.INFO)
logger.addHandler(info_file_handler)
logger.info(filepath)
with open(filepath, 'r') as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, 'r') as package_f:
logger.info(package_f.read())
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
return logger
def makedirs(filename):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
def str_to_list(src, delimiter, converter):
"""Conver a string to list.
"""
src_split = src.split(delimiter)
res = [converter(_) for _ in src_split]
return res
def get_config_from_json(json_file):
"""
Get the config from a json file
:param json_file:
:return: config(namespace) or config(dictionary)
"""
# parse the configurations from the config json file provided
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
config = edict(config_dict)
return config, config_dict
def process_config(json_file, runs=None):
"""Process a json file into a config file.
Where we can access the value using .xxx
Note: we will need to create a similar directory as the config file.
"""
config, _ = get_config_from_json(json_file)
paths = json_file.split('/')[1:-1]
summn = [config.exp_name]
chekn = [config.exp_name]
if runs is not None:
summn.append('run_%s' % runs)
chekn.append('run_%s' % runs)
summn.append("summary/")
chekn.append("checkpoint/")
summary_dir = ["./runs/pruning"] + paths + summn
ckpt_dir = ["./runs/pruning"] + paths + chekn
config.summary_dir = os.path.join(*summary_dir)
config.checkpoint_dir = os.path.join(*ckpt_dir)
print("=> config.summary_dir: %s" % config.summary_dir)
print("=> config.checkpoint_dir: %s" % config.checkpoint_dir)
return config
def try_contiguous(x):
if not x.is_contiguous():
x = x.contiguous()
return x
def try_cuda(x):
if torch.cuda.is_available():
x = x.cuda()
return x
def tensor_to_list(tensor):
if len(tensor.shape) == 1:
return [tensor[_].item() for _ in range(tensor.shape[0])]
else:
return [tensor_to_list(tensor[_]) for _ in range(tensor.shape[0])]
# =====================================================
# For learning rate schedule
# =====================================================
class StairCaseLRScheduler(object):
def __init__(self, start_at, interval, decay_rate):
self.start_at = start_at
self.interval = interval
self.decay_rate = decay_rate
def __call__(self, optimizer, iteration):
start_at = self.start_at
interval = self.interval
decay_rate = self.decay_rate
if (start_at >= 0) \
and (iteration >= start_at) \
and (iteration + 1) % interval == 0:
for param_group in optimizer.param_groups:
param_group['lr'] *= decay_rate
print('[%d]Decay lr to %f' % (iteration, param_group['lr']))
@staticmethod
def get_lr(optimizer):
for param_group in optimizer.param_groups:
lr = param_group['lr']
return lr
class PresetLRScheduler(object):
"""Using a manually designed learning rate schedule rules.
"""
def __init__(self, decay_schedule):
# decay_schedule is a dictionary
# which is for specifying iteration -> lr
self.decay_schedule = decay_schedule
print('=> Using a preset learning rate schedule:')
pprint(decay_schedule)
self.for_once = True
def __call__(self, optimizer, iteration):
for param_group in optimizer.param_groups:
lr = self.decay_schedule.get(iteration, param_group['lr'])
param_group['lr'] = lr
@staticmethod
def get_lr(optimizer):
for param_group in optimizer.param_groups:
lr = param_group['lr']
return lr
# =======================================================
# For math computation
# =======================================================
def prod(l):
val = 1
if isinstance(l, list):
for v in l:
val *= v
else:
val = val * l
return val | 4,926 | 27.982353 | 76 | py |
State-Frequency-Memory-stock-prediction | State-Frequency-Memory-stock-prediction-master/test/itosfm.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
import theano.tensor as T
from keras import backend as K
from keras import activations, initializations, regularizers
from keras.engine import Layer, InputSpec
from keras.layers.recurrent import Recurrent
class ITOSFM(Recurrent):
def __init__(self, output_dim, freq_dim, hidden_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.freq_dim = freq_dim
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ITOSFM, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
input_dim = input_shape[2]
self.input_dim = input_dim
self.states = [None, None, None, None, None]
self.W_i = self.init((input_dim, self.hidden_dim),
name='{}_W_i'.format(self.name))
self.U_i = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_i'.format(self.name))
self.b_i = K.zeros((self.hidden_dim,), name='{}_b_i'.format(self.name))
self.W_ste = self.init((input_dim, self.hidden_dim),
name='{}_W_ste'.format(self.name))
self.U_ste = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_ste'.format(self.name))
self.b_ste = self.forget_bias_init((self.hidden_dim,),
name='{}_b_ste'.format(self.name))
self.W_fre = self.init((input_dim, self.freq_dim),
name='{}_W_fre'.format(self.name))
self.U_fre = self.inner_init((self.hidden_dim, self.freq_dim),
name='{}_U_fre'.format(self.name))
self.b_fre = self.forget_bias_init((self.freq_dim,),
name='{}_b_fre'.format(self.name))
self.W_c = self.init((input_dim, self.hidden_dim),
name='{}_W_c'.format(self.name))
self.U_c = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_c'.format(self.name))
self.b_c = K.zeros((self.hidden_dim,), name='{}_b_c'.format(self.name))
self.W_o = self.init((input_dim, self.hidden_dim),
name='{}_W_o'.format(self.name))
self.U_o = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_o'.format(self.name))
self.b_o = K.zeros((self.hidden_dim,), name='{}_b_o'.format(self.name))
self.U_a = self.inner_init((self.freq_dim, 1),
name='{}_U_a'.format(self.name))
self.b_a = K.zeros((self.hidden_dim,), name='{}_b_a'.format(self.name))
self.W_p = self.init((self.hidden_dim, self.output_dim),
name='{}_W_p'.format(self.name))
self.b_p = K.zeros((self.output_dim,), name='{}_b_p'.format(self.name))
self.trainable_weights = [self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_ste, self.U_ste, self.b_ste,
self.W_fre, self.U_fre, self.b_fre,
self.W_o, self.U_o, self.b_o,
self.U_a, self.b_a,
self.W_p, self.b_p]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
def step(self, x, states):
p_tm1 = states[0]
h_tm1 = states[1]
S_re_tm1 = states[2]
S_im_tm1 = states[3]
time_tm1 = states[4]
B_U = states[5]
B_W = states[6]
frequency = states[7]
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_ste = K.dot(x * B_W[0], self.W_ste) + self.b_ste
x_fre = K.dot(x * B_W[0], self.W_fre) + self.b_fre
x_c = K.dot(x * B_W[0], self.W_c) + self.b_c
x_o = K.dot(x * B_W[0], self.W_o) + self.b_o
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
ste = self.inner_activation(x_ste + K.dot(h_tm1 * B_U[0], self.U_ste))
fre = self.inner_activation(x_fre + K.dot(h_tm1 * B_U[0], self.U_fre))
ste = K.reshape(ste, (-1, self.hidden_dim, 1))
fre = K.reshape(fre, (-1, 1, self.freq_dim))
f = ste * fre
c = i * self.activation(x_c + K.dot(h_tm1 * B_U[0], self.U_c))
time = time_tm1 + 1
omega = K.cast_to_floatx(2*np.pi)* time * frequency
re = T.cos(omega)
im = T.sin(omega)
c = K.reshape(c, (-1, self.hidden_dim, 1))
S_re = f * S_re_tm1 + c * re
S_im = f * S_im_tm1 + c * im
A = K.square(S_re) + K.square(S_im)
A = K.reshape(A, (-1, self.freq_dim))
A_a = K.dot(A * B_U[0], self.U_a)
A_a = K.reshape(A_a, (-1, self.hidden_dim))
a = self.activation(A_a + self.b_a)
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[0], self.U_o))
h = o * a
p = K.dot(h, self.W_p) + self.b_p
return p, [p, h, S_re, S_im, time]
def get_constants(self, x):
constants = []
constants.append([K.cast_to_floatx(1.) for _ in range(6)])
constants.append([K.cast_to_floatx(1.) for _ in range(7)])
array = np.array([float(ii)/self.freq_dim for ii in range(self.freq_dim)])
constants.append([K.cast_to_floatx(array)])
return constants
def get_config(self):
config = {"output_dim": self.output_dim,
"fre_dim": self.fre_dim,
"hidden_dim": self.hidden_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"forget_bias_init": self.forget_bias_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"U_regularizer": self.U_regularizer.get_config() if self.U_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"dropout_W": self.dropout_W,
"dropout_U": self.dropout_U}
base_config = super(ITOSFM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 8,589 | 42.604061 | 104 | py |
State-Frequency-Memory-stock-prediction | State-Frequency-Memory-stock-prediction-master/test/build.py | import time
import warnings
import numpy as np
import keras
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from itosfm import ITOSFM
from keras.models import Sequential
warnings.filterwarnings("ignore")
#Load data from data file, and split the data into training, validation and test set
def load_data(filename, step):
#load data from the data file
day = step
data = np.load(filename)
data = data[:, :]
gt_test = data[:,day:]
#data normalization
max_data = np.max(data, axis = 1)
min_data = np.min(data, axis = 1)
max_data = np.reshape(max_data, (max_data.shape[0],1))
min_data = np.reshape(min_data, (min_data.shape[0],1))
data = (2 * data - (max_data + min_data)) / (max_data - min_data)
#dataset split
train_split = round(0.8 * data.shape[1])
val_split = round(0.9 * data.shape[1])
x_train = data[:,:train_split]
y_train = data[:,day:train_split+day]
x_val = data[:,:val_split]
y_val = data[:,day:val_split+day]
x_test = data[:,:-day]
y_test = data[:,day:]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
y_train = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], 1))
y_val = np.reshape(y_val, (y_val.shape[0], y_val.shape[1], 1))
y_test = np.reshape(y_test, (y_test.shape[0], y_test.shape[1], 1))
return [x_train, y_train, x_val, y_val, x_test, y_test, gt_test, max_data, min_data]
#build the model
def build_model(layers, freq, learning_rate):
model = Sequential()
model.add(ITOSFM(
input_dim=layers[0],
hidden_dim=layers[1],
output_dim=layers[2],
freq_dim = freq,
return_sequences=True))
start = time.time()
rms = keras.optimizers.RMSprop(lr=learning_rate)
model.compile(loss="mse", optimizer="rmsprop")
print "Compilation Time : ", time.time() - start
return model | 2,067 | 31.825397 | 88 | py |
State-Frequency-Memory-stock-prediction | State-Frequency-Memory-stock-prediction-master/train/itosfm.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
import theano.tensor as T
from keras import backend as K
from keras import activations, initializations, regularizers
from keras.engine import Layer, InputSpec
from keras.layers.recurrent import Recurrent
class ITOSFM(Recurrent):
def __init__(self, output_dim, freq_dim, hidden_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.freq_dim = freq_dim
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ITOSFM, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
input_dim = input_shape[2]
self.input_dim = input_dim
self.states = [None, None, None, None, None]
self.W_i = self.init((input_dim, self.hidden_dim),
name='{}_W_i'.format(self.name))
self.U_i = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_i'.format(self.name))
self.b_i = K.zeros((self.hidden_dim,), name='{}_b_i'.format(self.name))
self.W_ste = self.init((input_dim, self.hidden_dim),
name='{}_W_ste'.format(self.name))
self.U_ste = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_ste'.format(self.name))
self.b_ste = self.forget_bias_init((self.hidden_dim,),
name='{}_b_ste'.format(self.name))
self.W_fre = self.init((input_dim, self.freq_dim),
name='{}_W_fre'.format(self.name))
self.U_fre = self.inner_init((self.hidden_dim, self.freq_dim),
name='{}_U_fre'.format(self.name))
self.b_fre = self.forget_bias_init((self.freq_dim,),
name='{}_b_fre'.format(self.name))
self.W_c = self.init((input_dim, self.hidden_dim),
name='{}_W_c'.format(self.name))
self.U_c = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_c'.format(self.name))
self.b_c = K.zeros((self.hidden_dim,), name='{}_b_c'.format(self.name))
self.W_o = self.init((input_dim, self.hidden_dim),
name='{}_W_o'.format(self.name))
self.U_o = self.inner_init((self.hidden_dim, self.hidden_dim),
name='{}_U_o'.format(self.name))
self.b_o = K.zeros((self.hidden_dim,), name='{}_b_o'.format(self.name))
self.U_a = self.inner_init((self.freq_dim, 1),
name='{}_U_a'.format(self.name))
self.b_a = K.zeros((self.hidden_dim,), name='{}_b_a'.format(self.name))
self.W_p = self.init((self.hidden_dim, self.output_dim),
name='{}_W_p'.format(self.name))
self.b_p = K.zeros((self.output_dim,), name='{}_b_p'.format(self.name))
self.trainable_weights = [self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_ste, self.U_ste, self.b_ste,
self.W_fre, self.U_fre, self.b_fre,
self.W_o, self.U_o, self.b_o,
self.U_a, self.b_a,
self.W_p, self.b_p]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
def step(self, x, states):
p_tm1 = states[0]
h_tm1 = states[1]
S_re_tm1 = states[2]
S_im_tm1 = states[3]
time_tm1 = states[4]
B_U = states[5]
B_W = states[6]
frequency = states[7]
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_ste = K.dot(x * B_W[0], self.W_ste) + self.b_ste
x_fre = K.dot(x * B_W[0], self.W_fre) + self.b_fre
x_c = K.dot(x * B_W[0], self.W_c) + self.b_c
x_o = K.dot(x * B_W[0], self.W_o) + self.b_o
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
ste = self.inner_activation(x_ste + K.dot(h_tm1 * B_U[0], self.U_ste))
fre = self.inner_activation(x_fre + K.dot(h_tm1 * B_U[0], self.U_fre))
ste = K.reshape(ste, (-1, self.hidden_dim, 1))
fre = K.reshape(fre, (-1, 1, self.freq_dim))
f = ste * fre
c = i * self.activation(x_c + K.dot(h_tm1 * B_U[0], self.U_c))
time = time_tm1 + 1
omega = K.cast_to_floatx(2*np.pi)* time * frequency
re = T.cos(omega)
im = T.sin(omega)
c = K.reshape(c, (-1, self.hidden_dim, 1))
S_re = f * S_re_tm1 + c * re
S_im = f * S_im_tm1 + c * im
A = K.square(S_re) + K.square(S_im)
A = K.reshape(A, (-1, self.freq_dim))
A_a = K.dot(A * B_U[0], self.U_a)
A_a = K.reshape(A_a, (-1, self.hidden_dim))
a = self.activation(A_a + self.b_a)
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[0], self.U_o))
h = o * a
p = K.dot(h, self.W_p) + self.b_p
return p, [p, h, S_re, S_im, time]
def get_constants(self, x):
constants = []
constants.append([K.cast_to_floatx(1.) for _ in range(6)])
constants.append([K.cast_to_floatx(1.) for _ in range(7)])
array = np.array([float(ii)/self.freq_dim for ii in range(self.freq_dim)])
constants.append([K.cast_to_floatx(array)])
return constants
def get_config(self):
config = {"output_dim": self.output_dim,
"fre_dim": self.fre_dim,
"hidden_dim": self.hidden_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"forget_bias_init": self.forget_bias_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"U_regularizer": self.U_regularizer.get_config() if self.U_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"dropout_W": self.dropout_W,
"dropout_U": self.dropout_U}
base_config = super(ITOSFM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 8,589 | 42.604061 | 104 | py |
State-Frequency-Memory-stock-prediction | State-Frequency-Memory-stock-prediction-master/train/build.py | import time
import warnings
import numpy as np
import keras
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from itosfm import ITOSFM
from keras.models import Sequential
warnings.filterwarnings("ignore")
#Load data from data file, and split the data into training, validation and test set
def load_data(filename, step):
#load data from the data file
day = step
data = np.load(filename)
data = data[:, :]
gt_test = data[:,day:]
#data normalization
max_data = np.max(data, axis = 1)
min_data = np.min(data, axis = 1)
max_data = np.reshape(max_data, (max_data.shape[0],1))
min_data = np.reshape(min_data, (min_data.shape[0],1))
data = (2 * data - (max_data + min_data)) / (max_data - min_data)
#dataset split
train_split = round(0.8 * data.shape[1])
val_split = round(0.9 * data.shape[1])
x_train = data[:,:train_split]
y_train = data[:,day:train_split+day]
x_val = data[:,:val_split]
y_val = data[:,day:val_split+day]
x_test = data[:,:-day]
y_test = data[:,day:]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
y_train = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], 1))
y_val = np.reshape(y_val, (y_val.shape[0], y_val.shape[1], 1))
y_test = np.reshape(y_test, (y_test.shape[0], y_test.shape[1], 1))
return [x_train, y_train, x_val, y_val, x_test, y_test, gt_test, max_data, min_data]
#build the model
def build_model(layers, freq, learning_rate):
model = Sequential()
model.add(ITOSFM(
input_dim=layers[0],
hidden_dim=layers[1],
output_dim=layers[2],
freq_dim = freq,
return_sequences=True))
start = time.time()
rms = keras.optimizers.RMSprop(lr=learning_rate)
model.compile(loss="mse", optimizer="rmsprop")
print "Compilation Time : ", time.time() - start
return model | 2,067 | 31.825397 | 88 | py |
delora | delora-main/setup.py | import os
from setuptools import setup, find_packages
from setuptools.command.install import install
class CustomInstallCommand(install):
# This is only run for "python setup.py install" (not for "pip install -e .")
def run(self):
print("--------------------------------")
print("Writing environment variables to .env ...")
os.system('echo "export DELORA_ROOT=' + os.getcwd() + '" >> .env')
print("...done.")
print("--------------------------------")
install.run(self)
setup(name='DeLORA',
version='1.0',
author='Julian Nubert (nubertj@ethz.ch)',
package_dir={"": "src"},
install_requires=[
'numpy',
'torch',
'opencv-python',
'pyyaml',
'rospkg'
],
scripts=['bin/preprocess_data.py', 'bin/run_rosnode.py', 'bin/run_testing.py', 'bin/run_training.py',
'bin/visualize_pointcloud_normals.py'],
license='LICENSE',
description='Self-supervised Learning of LiDAR Odometry for Robotic Applications',
cmdclass={'install': CustomInstallCommand, },
)
| 1,125 | 32.117647 | 107 | py |
delora | delora-main/src/utility/linalg.py | #!/usr/bin/env python3
# Modified from: Modar M. Alfadly, https://discuss.pytorch.org/t/covariance-and-gradient-support/16217
import torch
def cov(point_neighbors, rowvar=True):
'''
Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if point_neighbors.dim() > 3:
raise ValueError('m has more than 3 dimensions')
elif point_neighbors.dim() < 2:
point_neighbors = point_neighbors.view(1, -1)
if not rowvar and point_neighbors.size(0) != 1:
point_neighbors = point_neighbors.t()
if point_neighbors.dim() == 3:
point_neighbors_not_zero_bool = (point_neighbors[:, 0, :] != torch.zeros(1).to(
point_neighbors.device)) | (point_neighbors[:, 1, :] != torch.zeros(1).to(
point_neighbors.device)) | (point_neighbors[:, 2, :] != torch.zeros(1).to(
point_neighbors.device))
number_neighbours = torch.sum(point_neighbors_not_zero_bool, dim=1)
factor = torch.ones(1).to(number_neighbours.device) / (number_neighbours - 1)
# Mean assumes that all 9 neighbours are present, but some are zero
mean = torch.mean(point_neighbors, dim=2, keepdim=True) * point_neighbors.shape[
2] / number_neighbours.view(-1, 1, 1)
difference = point_neighbors - mean
difference.permute(0, 2, 1)[~point_neighbors_not_zero_bool] = torch.zeros(1).to(
point_neighbors.device)
difference_transpose = difference.permute(0, 2, 1)
else:
factor = 1.0 / (point_neighbors.shape[1] - 1)
mean = torch.mean(point_neighbors, dim=1, keepdim=True)
difference = point_neighbors - mean
difference_transpose = difference.t()
squared_difference = difference.matmul(difference_transpose) # .squeeze()
return factor.view(-1, 1, 1) * squared_difference, number_neighbours
| 2,647 | 45.45614 | 102 | py |
delora | delora-main/src/utility/projection.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import numpy as np
import torch
import numba
class ImageProjectionLayer(torch.nn.Module):
def __init__(self, config):
super(ImageProjectionLayer, self).__init__()
self.device = config["device"]
self.config = config
self.horizontal_field_of_view = config["horizontal_field_of_view"]
# The following will be set while doing projections (different sensors etc.):
# self.height_pixel, self.vertical_field_of_view
def compute_2D_coordinates(self, point_cloud, width_pixel, height_pixel,
vertical_field_of_view):
u = ((torch.atan2(point_cloud[:, 1, :], point_cloud[:, 0, :]) -
self.horizontal_field_of_view[0]) / (
self.horizontal_field_of_view[1] - self.horizontal_field_of_view[0]) * (
width_pixel - 1))
v = ((torch.atan2(point_cloud[:, 2, :], torch.norm(point_cloud[:, :2, :], dim=1)) -
vertical_field_of_view[0]) / (
vertical_field_of_view[1] - vertical_field_of_view[0]) * (
height_pixel - 1))
return u, v
# Keeps closest point, because u and v are computed previously based on range-sorted point cloud
@staticmethod
@numba.njit
def remove_duplicate_indices(u, v, occupancy_grid, unique_bool, image_to_pointcloud_indices):
for index in range(len(u)):
if not occupancy_grid[v[index], u[index]]:
occupancy_grid[v[index], u[index]] = True
unique_bool[index] = True
image_to_pointcloud_indices[0, index, 0] = v[index]
image_to_pointcloud_indices[0, index, 1] = u[index]
return unique_bool, image_to_pointcloud_indices
# input is unordered point cloud scan, e.g. shape [2000,4], with [.,0], [.,1], [.,2], [.,3] being x, y, z, i values
# Gets projected to an image
# returned point cloud only contains unique points per pixel-discretization, i.e. the closest one
def project_to_img(self, point_cloud, dataset):
# Get sensor specific parameters
width_pixel = self.config[dataset]["horizontal_cells"]
height_pixel = self.config[dataset]["vertical_cells"]
vertical_field_of_view = self.config[dataset]["vertical_field_of_view"]
# Add range to point cloud
point_cloud_with_range = torch.zeros(
(point_cloud.shape[0], point_cloud.shape[1] + 1, point_cloud.shape[2]),
device=self.device, requires_grad=False)
point_cloud_with_range[:, :point_cloud.shape[1], :] = point_cloud
distance = torch.norm(point_cloud_with_range[:, :3, :], dim=1)
point_cloud_with_range[:, -1, :] = distance.detach()
del point_cloud # safety such that only correctly sorted one is used
# Only keep closest points
sort_indices = torch.argsort(
point_cloud_with_range[:, point_cloud_with_range.shape[1] - 1, :], dim=1)
# for batch_idx in range(len(point_cloud_with_range)):
point_cloud_with_range = point_cloud_with_range[:, :, sort_indices[0]]
u, v = self.compute_2D_coordinates(point_cloud=point_cloud_with_range,
width_pixel=width_pixel,
height_pixel=height_pixel,
vertical_field_of_view=vertical_field_of_view)
inside_fov_bool = (torch.round(u) <= width_pixel - 1) & (torch.round(u) >= 0) & (
torch.round(v) <= height_pixel - 1) & (torch.round(v) >= 0)
u_filtered = torch.round(u[inside_fov_bool])
v_filtered = torch.round(v[inside_fov_bool])
point_cloud_with_range = point_cloud_with_range[:, :, inside_fov_bool[0]]
occupancy_grid = np.zeros((height_pixel, width_pixel), dtype=bool)
# Find pixel to point cloud mapping (for masking in loss later on)
image_to_pointcloud_indices = np.zeros((1, len(u_filtered), 2), dtype=int)
unique_bool = np.zeros((len(u_filtered)), dtype=bool)
unique_bool, image_to_pointcloud_indices = ImageProjectionLayer.remove_duplicate_indices(
u=(u_filtered.long().to(torch.device("cpu")).numpy()),
v=(v_filtered.long().to(torch.device("cpu")).numpy()),
occupancy_grid=occupancy_grid,
unique_bool=unique_bool,
image_to_pointcloud_indices=image_to_pointcloud_indices)
unique_bool = torch.from_numpy(unique_bool).to(self.device)
image_to_pointcloud_indices = torch.from_numpy(image_to_pointcloud_indices).to(self.device)
u_filtered = u_filtered[unique_bool]
v_filtered = v_filtered[unique_bool]
point_cloud_with_range = point_cloud_with_range[:, :, unique_bool]
image_to_pointcloud_indices = image_to_pointcloud_indices[:, unique_bool]
image_representation = torch.zeros(
(point_cloud_with_range.shape[0], point_cloud_with_range.shape[1], height_pixel,
width_pixel), device=self.device, requires_grad=False)
image_representation[:, :, v_filtered.long(), u_filtered.long()] = \
point_cloud_with_range.to(self.device)
return image_representation, u, v, sort_indices[inside_fov_bool][
unique_bool], image_to_pointcloud_indices
def forward(self, input, dataset):
return self.project_to_img(point_cloud=input, dataset=dataset)
| 5,689 | 50.727273 | 119 | py |
delora | delora-main/src/utility/geometry.py | #!/usr/bin/env python3
# Parts of the code taken from pytorch3d (https://pytorch3d.readthedocs.io/)
import torch
def _angle_from_tan(
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
):
"""
Extract the first or third Euler angle from the two members of
the matrix which are positive constant times its sine and cosine.
Args:
axis: Axis label "X" or "Y or "Z" for the angle we are finding.
other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
convention.
data: Rotation matrices as tensor of shape (..., 3, 3).
horizontal: Whether we are looking for the angle for the third axis,
which means the relevant entries are in the same row of the
rotation matrix. If not, they are in the same column.
tait_bryan: Whether the first and third axes in the convention differ.
Returns:
Euler Angles in radians for each matrix in data as a tensor
of shape (...).
"""
i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
if horizontal:
i2, i1 = i1, i2
even = (axis + other_axis) in ["XY", "YZ", "ZX"]
if horizontal == even:
return torch.atan2(data[..., i1], data[..., i2])
if tait_bryan:
return torch.atan2(-data[..., i2], data[..., i1])
return torch.atan2(data[..., i2], -data[..., i1])
def _index_from_letter(letter: str):
if letter == "X":
return 0
if letter == "Y":
return 1
if letter == "Z":
return 2
def matrix_to_euler_angles(matrix, convention: str):
"""
Convert rotations given as rotation matrices to Euler angles in radians.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
convention: Convention string of three uppercase letters.
Returns:
Euler angles in radians as tensor of shape (..., 3).
"""
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
i0 = _index_from_letter(convention[0])
i2 = _index_from_letter(convention[2])
tait_bryan = i0 != i2
if tait_bryan:
central_angle = torch.asin(
matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
)
else:
central_angle = torch.acos(matrix[..., i0, i0])
o = (
_angle_from_tan(
convention[0], convention[1], matrix[..., i2], False, tait_bryan
),
central_angle,
_angle_from_tan(
convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
),
)
return torch.stack(o, -1)
| 3,009 | 33.204545 | 78 | py |
delora | delora-main/src/models/model_parts.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import torch
import platform
if not "2.7" in platform.python_version():
import kornia
import utility.geometry
import numba
import numpy as np
class CircularPad(torch.nn.Module):
def __init__(self, padding=(1, 1, 0, 0)):
super(CircularPad, self).__init__()
self.padding = padding
def forward(self, input):
return torch.nn.functional.pad(input=input, pad=self.padding, mode='circular')
class GeometryHandler:
def __init__(self, config):
self.device = config["device"]
@staticmethod
def quaternion_to_rot_matrix(quaternion):
return kornia.quaternion_to_rotation_matrix(quaternion=quaternion)
@staticmethod
def angle_axis_to_rot_matrix(euler):
return kornia.angle_axis_to_rotation_matrix(angle_axis=euler)
@staticmethod
def get_transformation_matrix_quaternion(translation, quaternion, device):
rotation_matrix = GeometryHandler.quaternion_to_rot_matrix(quaternion=quaternion)
transformation_matrix = torch.zeros((rotation_matrix.shape[0], 4, 4), device=device)
transformation_matrix[:, :3, :3] = rotation_matrix
transformation_matrix[:, 3, 3] = 1
transformation_matrix[:, :3, 3] = translation
return transformation_matrix
@staticmethod
def get_euler_angles_from_matrix(transformation_matrix, device):
# "ZYX" corresponding to yaw, pitch, roll
euler_angles = utility.geometry.matrix_to_euler_angles(
matrix=transformation_matrix[:, :3, :3],
convention="ZYX")
return euler_angles
@staticmethod
def get_three_trafo_matrices(euler_angles, translation, device):
transformation_yaw = torch.zeros((1, 1, 4, 4), device=device)
transformation_yaw[0, 0] = torch.eye(4)
transformation_pitch = torch.zeros((1, 1, 4, 4), device=device)
transformation_pitch[0, 0] = torch.eye(4)
transformation_roll = torch.zeros((1, 1, 4, 4), device=device)
transformation_roll[0, 0] = torch.eye(4)
# Write in values
## Yaw
transformation_yaw[0, 0, 0, 0] = torch.cos(euler_angles[0, 0])
transformation_yaw[0, 0, 1, 1] = torch.cos(euler_angles[0, 0])
transformation_yaw[0, 0, 0, 1] = -torch.sin(euler_angles[0, 0])
transformation_yaw[0, 0, 1, 0] = torch.sin(euler_angles[0, 0])
transformation_yaw[0, 0, :3, 3] = translation
# Pitch
transformation_pitch[0, 0, 0, 0] = torch.cos(euler_angles[0, 1])
transformation_pitch[0, 0, 2, 2] = torch.cos(euler_angles[0, 1])
transformation_pitch[0, 0, 0, 2] = torch.sin(euler_angles[0, 1])
transformation_pitch[0, 0, 2, 0] = -torch.sin(euler_angles[0, 1])
transformation_pitch[0, 0, :3, 3] = translation
# Roll
transformation_roll[0, 0, 1, 1] = torch.cos(euler_angles[0, 2])
transformation_roll[0, 0, 2, 2] = torch.cos(euler_angles[0, 2])
transformation_roll[0, 0, 1, 2] = -torch.sin(euler_angles[0, 2])
transformation_roll[0, 0, 2, 1] = torch.sin(euler_angles[0, 2])
transformation_roll[0, 0, :3, 3] = translation
return torch.cat((transformation_yaw, transformation_pitch, transformation_roll), dim=1)
@staticmethod
def get_transformation_matrix_angle_axis(translation, euler, device):
rotation_matrix = GeometryHandler.angle_axis_to_rot_matrix(euler=euler)
transformation_matrix = torch.zeros((rotation_matrix.shape[0], 4, 4), device=device)
transformation_matrix[:, :3, :3] = rotation_matrix
transformation_matrix[:, 3, 3] = 1
transformation_matrix[:, :3, 3] = translation
return transformation_matrix
| 3,940 | 41.836957 | 96 | py |
delora | delora-main/src/models/resnet_modified.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
# This model is build on top of the torchvision resnet model.
import torch
# Default "layers": every layer consists of 2 blocks (consisting of 2 convolutions)
# There is only one downsampling done PER LAYER (therefore this differentiation between layers and blocks)
class ResNetModified(torch.nn.Module):
def __init__(self, in_channels, num_outputs, use_dropout=False, layers=[2, 2, 2, 2],
factor_fewer_resnet_channels=1, activation_fct="relu", groups=1, width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None):
super(ResNetModified, self).__init__()
block = BasicBlock
self.activation_fct = activation_fct
self.inplanes = int(64 / factor_fewer_resnet_channels)
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if use_dropout:
self.dropout_values = torch.nn.Dropout(p=0.2, inplace=False)
self.dropout_channels = torch.nn.Dropout2d(p=0.2, inplace=False)
else:
self.dropout_values = torch.nn.Identity()
self.dropout_channels = torch.nn.Identity()
self.conv1 = torch.nn.Conv2d(in_channels=in_channels,
out_channels=self.inplanes,
kernel_size=3, stride=(1, 2), padding=(1, 0), bias=False)
# self.bn1 = norm_layer(self.inplanes)
self.relu = torch.nn.ReLU(inplace=True)
self.tanh = torch.nn.Tanh()
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=(1, 2), padding=(1, 0))
# No down-sampling in first block
self.layer1 = self._make_layer(block=block, planes=int(64 / factor_fewer_resnet_channels),
blocks=layers[0])
# From here on down-sampling
self.layer2 = self._make_layer(block=block, planes=int(128 / factor_fewer_resnet_channels),
blocks=layers[1], stride=(1, 2),
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block=block, planes=int(256 / factor_fewer_resnet_channels),
blocks=layers[2], stride=(1, 2),
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block=block, planes=int(512 / factor_fewer_resnet_channels),
blocks=layers[3], stride=(2, 2),
dilate=replace_stride_with_dilation[2])
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.fc = torch.nn.Linear(int(512 / factor_fewer_resnet_channels) * block.expansion, num_outputs)
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity=self.activation_fct)
elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = torch.nn.Sequential(
conv1x1(in_planes=self.inplanes, out_planes=planes * block.expansion,
stride=stride))
layers = []
layers.append(block(inplanes=self.inplanes, planes=planes, stride=stride,
downsample=downsample, groups=self.groups, base_width=self.base_width,
dilation=previous_dilation, activation_fct=self.activation_fct))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(inplanes=self.inplanes, planes=planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation, activation_fct=self.activation_fct))
return torch.nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.dropout_values(x) # Dropout
x = torch.nn.functional.pad(input=x, pad=(1, 1, 0, 0), mode='circular')
x = self.conv1(x)
# x = self.bn1(x)
x = self.relu(x) if self.activation_fct == "relu" else self.tanh(x)
x = torch.nn.functional.pad(input=x, pad=(1, 1, 0, 0), mode='circular')
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
# x2 = self.dropout_channels(x2) # Dropout
x3 = self.layer3(x2)
x3 = self.dropout_channels(x3) # Dropout
x4 = self.layer4(x3)
output = self.avgpool(x4)
output = torch.flatten(output, 1)
output = self.fc(output)
output = self.dropout_values(output) # Dropout
features = [x1, x2, x3, x4, output]
return features
def forward(self, x):
return self._forward_impl(x)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, padding=0):
"""3x3 convolution with padding"""
return torch.nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return torch.nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(torch.nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, activation_fct="relu", norm_layer=None):
super(BasicBlock, self).__init__()
# if norm_layer is None:
# norm_layer = torch.nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(in_planes=inplanes, out_planes=planes, stride=stride, padding=(1, 0))
# self.bn1 = norm_layer(planes)
self.activation = torch.nn.ReLU(inplace=True) if activation_fct == "relu" else torch.nn.Tanh()
self.conv2 = conv3x3(in_planes=planes, out_planes=planes, padding=(1, 0))
# self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = torch.nn.functional.pad(input=x, pad=(1, 1, 0, 0), mode='circular')
out = self.conv1(out)
# out = self.bn1(out)
out = self.activation(out)
out = torch.nn.functional.pad(input=out, pad=(1, 1, 0, 0), mode='circular')
out = self.conv2(out)
# out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x) # no padding required, since 1x1 conv
out += identity
out = self.activation(out)
return out
| 7,993 | 43.910112 | 120 | py |
delora | delora-main/src/models/model.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
from __future__ import division
import torch
import models.model_parts
import models.resnet_modified
class OdometryModel(torch.nn.Module):
def __init__(self, config):
super(OdometryModel, self).__init__()
self.device = config["device"]
self.config = config
self.pre_feature_extraction = config["pre_feature_extraction"]
in_channels = 8
num_feature_extraction_layers = 5
print("Used activation function is: " + self.config["activation_fct"] + ".")
if self.config["activation_fct"] != "relu" and self.config["activation_fct"] != "tanh":
raise Exception('The specified activation function must be either "relu" or "tanh".')
# Here are trainable parameters
if config["pre_feature_extraction"]:
layers = []
for layer_index in range(num_feature_extraction_layers):
input_channels = (
int(in_channels / 2) if layer_index == 0 else (layer_index) * in_channels)
layers.append(models.model_parts.CircularPad(padding=(1, 1, 0, 0)))
layers.append(torch.nn.Conv2d(in_channels=input_channels,
out_channels=(layer_index + 1) * in_channels,
kernel_size=3, padding=(1, 0), bias=False))
if self.config["activation_fct"] == "relu":
layers.append(torch.nn.ReLU(inplace=True))
else:
layers.append(torch.nn.Tanh())
self.feature_extractor = torch.nn.Sequential(*layers)
print("Number of trainable parameters in our feature extractor: " + \
f'{sum(p.numel() for p in self.feature_extractor.parameters()):,}')
self.resnet = models.resnet_modified.ResNetModified(
in_channels=in_channels if not config[
"pre_feature_extraction"] else 2 * num_feature_extraction_layers * in_channels,
num_outputs=config["resnet_outputs"],
use_dropout=self.config["use_dropout"],
layers=self.config["layers"],
factor_fewer_resnet_channels=self.config["factor_fewer_resnet_channels"],
activation_fct=self.config["activation_fct"])
print("Number of trainable parameters in our ResNet: " + \
f'{sum(p.numel() for p in self.resnet.parameters()):,}')
rot_out_features = 4
if self.config["use_single_mlp_at_output"]:
self.fully_connected_rot_trans = torch.nn.Sequential(
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=config["resnet_outputs"], out_features=512),
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=512, out_features=512),
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=512, out_features=256),
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=256, out_features=64),
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=64, out_features=3 + rot_out_features))
print("Number of trainable parameters in our rot_trans net: " + \
f'{sum(p.numel() for p in self.fully_connected_rot_trans.parameters()):,}')
else:
self.fully_connected_rotation = torch.nn.Sequential(
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=config["resnet_outputs"], out_features=100),
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=100, out_features=rot_out_features))
self.fully_connected_translation = torch.nn.Sequential(
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=config["resnet_outputs"], out_features=100),
torch.nn.ReLU() if self.config["activation_fct"] == "relu" else torch.nn.Tanh(),
torch.nn.Linear(in_features=100, out_features=3))
print("Number of trainable parameters in our rotation net: " + \
f'{sum(p.numel() for p in self.fully_connected_rotation.parameters()):,}')
print("Number of trainable parameters in our translation net: " + \
f'{sum(p.numel() for p in self.fully_connected_translation.parameters()):,}')
# geometry_handler does not contain any trainable parameters
self.geometry_handler = models.model_parts.GeometryHandler(config=config)
def forward_features(self, image_1, image_2):
if self.pre_feature_extraction:
x1 = self.feature_extractor(image_1)
x2 = self.feature_extractor(image_2)
x = torch.cat((x1, x2), dim=1)
else:
x = torch.cat((image_1, image_2), dim=1)
features = self.resnet(x)
return features
def forward(self, image_1, image_2):
x = self.forward_features(image_1=image_1, image_2=image_2)
x = x[-1]
if self.config["use_single_mlp_at_output"]:
x = self.fully_connected_rot_trans(x)
x_rotation = x[:, :4]
x_translation = x[:, 4:]
else:
x_rotation = self.fully_connected_rotation(x)
x_translation = self.fully_connected_translation(x)
x_rotation = x_rotation / torch.norm(x_rotation)
return (x_translation, x_rotation)
| 6,108 | 51.213675 | 97 | py |
delora | delora-main/src/deploy/tester.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import mlflow
import numpy as np
import pickle
import torch
import deploy.deployer
class Tester(deploy.deployer.Deployer):
def __init__(self, config):
super(Tester, self).__init__(config=config)
self.training = False
# Load checkpoint
if self.config["checkpoint"]:
checkpoint = torch.load(self.config["checkpoint"], map_location=self.device)
## Model weights
self.model.load_state_dict(checkpoint["model_state_dict"])
print('\033[92m' + "Model weights loaded from " + self.config["checkpoint"] + "\033[0;0m")
else:
raise Exception("No checkpoint specified.")
print("Batch size set to 1 for the testing.")
self.batch_size = 1
# Split up dataset by sequences, since maps are created sequence-wise
self.computed_transformations_datasets = []
for dataset in self.config["datasets"]:
computed_transformations_sequences = [[] for element in self.config[dataset]["data_identifiers"]]
self.computed_transformations_datasets.append(computed_transformations_sequences)
def test_dataset(self, dataloader):
epoch_losses = {
"loss_epoch": 0.0,
"loss_point_cloud_epoch": 0.0,
"loss_field_of_view_epoch": 0.0,
"loss_po2po_epoch": 0.0,
"loss_po2pl_epoch": 0.0,
"loss_pl2pl_epoch": 0.0,
"visible_pixels_epoch": 0.0,
}
index_of_dataset = 0
index_of_sequence = 0
dataset = self.config["datasets"][0]
for index, preprocessed_dicts in enumerate(dataloader):
for preprocessed_dict in preprocessed_dicts:
# Move data to device
for key in preprocessed_dict:
if hasattr(preprocessed_dict[key], 'to'):
preprocessed_dict[key] = preprocessed_dict[key].to(self.device)
if not self.config["inference_only"]:
epoch_losses, computed_transformation = self.step(
preprocessed_dicts=preprocessed_dicts,
epoch_losses=epoch_losses,
log_images_bool=not index % 10)
else:
computed_transformation = self.step(
preprocessed_dicts=preprocessed_dicts,
epoch_losses=epoch_losses,
log_images_bool=not index % 10)
for preprocessed_dict in preprocessed_dicts:
# Case: we reached the next sequence or the next dataset --> log
if preprocessed_dict["index_sequence"] != index_of_sequence or preprocessed_dict[
"index_dataset"] != index_of_dataset:
self.log_map(index_of_dataset=index_of_dataset,
index_of_sequence=index_of_sequence,
dataset=dataset,
data_identifier=self.config[dataset]["data_identifiers"][index_of_sequence])
index_of_sequence = preprocessed_dict["index_sequence"]
index_of_dataset = preprocessed_dict["index_dataset"]
dataset = preprocessed_dict["dataset"]
self.computed_transformations_datasets[preprocessed_dict["index_dataset"]][
preprocessed_dict["index_sequence"]].append(
computed_transformation.detach().cpu().numpy())
if not index % 10:
if not self.config["inference_only"]:
print("Index: " + str(index) + " / " + str(len(dataloader)) + ", loss: " + str(
epoch_losses["loss_epoch"] / (index + 1)) + ", loss_pc: " + str(
epoch_losses["loss_point_cloud_epoch"] / (index + 1)) + ", loss_po2po: " + str(
epoch_losses["loss_po2po_epoch"] / (index + 1)) + ", loss_po2pl: " + str(
epoch_losses["loss_po2pl_epoch"] / (index + 1)) + ", loss_pl2pl: " + str(
epoch_losses["loss_pl2pl_epoch"] / (index + 1)) + ", visible pixels: " + str(
epoch_losses["visible_pixels_epoch"] / (index + 1)))
if self.config["visualize_images"]:
self.log_image(epoch=index, string="_" + dataset)
else:
print("Index: " + str(index) + " / " + str(len(dataloader)) + ", dataset: " +
dataset + ", sequence: " + str(index_of_sequence))
self.log_map(index_of_dataset=index_of_dataset,
index_of_sequence=index_of_sequence,
dataset=dataset,
data_identifier=self.config[dataset]["data_identifiers"][
index_of_sequence])
return epoch_losses
def test(self):
dataloader = torch.utils.data.DataLoader(dataset=self.dataset,
batch_size=self.batch_size,
shuffle=False,
collate_fn=Tester.list_collate,
num_workers=self.config["num_dataloader_workers"],
pin_memory=True if self.config[
"device"] == torch.device(
"cuda") else False)
# Check whether experiment already exists
client = mlflow.tracking.MlflowClient()
experiment_list = client.list_experiments()
id = None
for experiment in experiment_list:
if experiment.name == self.config["experiment"]:
id = experiment.experiment_id
if id is None:
print("Creating new MLFlow experiment: " + self.config["experiment"])
id = mlflow.create_experiment(self.config["experiment"])
else:
print("MLFlow experiment " + self.config["experiment"] + " already exists. Starting a new run within it.")
print("----------------------------------")
with mlflow.start_run(experiment_id=id, run_name="Test " + self.config["run_name"]):
self.log_config()
epoch_losses = self.test_dataset(dataloader=dataloader)
dataset_index = 0
if not self.config["inference_only"]:
epoch_losses["loss_epoch"] /= self.steps_per_epoch
epoch_losses["loss_point_cloud_epoch"] /= self.steps_per_epoch
epoch_losses["loss_po2po_epoch"] /= self.steps_per_epoch
epoch_losses["loss_po2pl_epoch"] /= self.steps_per_epoch
epoch_losses["loss_pl2pl_epoch"] /= self.steps_per_epoch
epoch_losses["visible_pixels_epoch"] /= self.steps_per_epoch
print("Dataset: " + format(dataset_index, '05d') + ", loss: " + str(
epoch_losses["loss_epoch"]) + ", loss_point_cloud: " + str(
epoch_losses["loss_point_cloud_epoch"]))
mlflow.log_metric("loss", float(epoch_losses["loss_epoch"]), step=dataset_index)
mlflow.log_metric("loss point cloud", float(epoch_losses["loss_point_cloud_epoch"]),
step=dataset_index)
mlflow.log_metric("loss po2po", float(epoch_losses["loss_po2po_epoch"]),
step=dataset_index)
mlflow.log_metric("loss po2pl", float(epoch_losses["loss_po2pl_epoch"]),
step=dataset_index)
mlflow.log_metric("loss pl2pl", float(epoch_losses["loss_pl2pl_epoch"]),
step=dataset_index)
mlflow.log_metric("visible pixels", float(epoch_losses["visible_pixels_epoch"]),
step=dataset_index)
| 8,233 | 49.515337 | 118 | py |
delora | delora-main/src/deploy/deployer.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import copy
import math
import torch
import mlflow
import numpy as np
import utility.plotting
import utility.poses
import utility.projection
import data.dataset
import models.model
import losses.icp_losses
class Deployer(object):
def __init__(self, config):
torch.cuda.empty_cache()
# Parameters and data
self.config = config
self.device = config["device"]
self.batch_size = config["batch_size"]
self.dataset = data.dataset.PreprocessedPointCloudDataset(config=config)
self.steps_per_epoch = int(len(self.dataset) / self.batch_size)
if self.config["mode"] == "testing":
self.ground_truth_dataset = data.dataset.PoseDataset(config=self.config)
# Projections and model
self.img_projection = utility.projection.ImageProjectionLayer(config=config)
if self.config["use_jit"]:
datasets = self.config["datasets"]
## Need to provide example tensor for torch jit
example_tensor = torch.zeros((1, 4,
self.config[datasets[0]]["vertical_cells"],
self.config[datasets[0]]["horizontal_cells"]),
device=self.device)
del datasets
self.model = torch.jit.trace(
models.model.OdometryModel(config=self.config).to(self.device),
example_inputs=(example_tensor, example_tensor))
else:
self.model = models.model.OdometryModel(config=self.config).to(self.device)
# Geometry handler
self.geometry_handler = models.model_parts.GeometryHandler(config=config)
# Loss and optimizer
self.lossTransformation = torch.nn.MSELoss()
self.lossPointCloud = losses.icp_losses.ICPLosses(config=self.config)
self.lossBCE = torch.nn.BCELoss()
self.training_bool = False
# Permanent variables for internal use
self.log_img_1 = []
self.log_img_2 = []
self.log_img_2_transformed = []
self.log_pointwise_loss = []
self.log_normals_target = []
self.log_normals_transformed_source = []
@staticmethod
def list_collate(batch_dicts):
data_dicts = [batch_dict for batch_dict in batch_dicts]
return data_dicts
def create_images(self, preprocessed_data, losses, plotting):
## Create image of target normals
image_1_at_normals, _, _, _, _ = self.img_projection(input=torch.cat((
preprocessed_data["scan_1"],
preprocessed_data["normal_list_1"]), dim=1), dataset=preprocessed_data["dataset"])
## Create image for points where normals exist (in source and target)
image_2_transformed_and_normals_and_pointwise_loss, _, _, _, _ = \
self.img_projection(input=torch.cat((plotting["scan_2_transformed"],
plotting["normals_2_transformed"],
losses["loss_po2pl_pointwise"]), dim=1),
dataset=preprocessed_data["dataset"])
self.log_pointwise_loss = image_2_transformed_and_normals_and_pointwise_loss[:, 6:9]
self.log_normals_target = image_1_at_normals[:, 3:6]
self.log_normals_transformed_source = image_2_transformed_and_normals_and_pointwise_loss[
:, 3:6]
def log_image(self, epoch, string):
utility.plotting.plot_lidar_image(
input=[self.log_img_1, self.log_img_2, self.log_img_2_transformed,
self.log_pointwise_loss, self.log_normals_target,
self.log_normals_transformed_source],
label="target",
iteration=(epoch + 1) * self.steps_per_epoch if self.training_bool else epoch,
path="/tmp/" + self.config["run_name"] + "_" + format(epoch, '05d') + string + ".png",
training=self.training_bool)
mlflow.log_artifact("/tmp/" + self.config["run_name"] + "_" + format(epoch, '05d') + string + ".png")
def log_map(self, index_of_dataset, index_of_sequence, dataset, data_identifier):
gt_translations = self.ground_truth_dataset.return_translations(
index_of_dataset=index_of_dataset, index_of_sequence=index_of_sequence)
gt_poses = self.ground_truth_dataset.return_poses(
index_of_dataset=index_of_dataset, index_of_sequence=index_of_sequence)
# Extract transformations and absolute poses
computed_transformations = self.computed_transformations_datasets[index_of_dataset][
index_of_sequence]
computed_poses = utility.poses.compute_poses(
computed_transformations=computed_transformations)
# Log things to mlflow artifacts
utility.poses.write_poses_to_text_file(
file_name="/tmp/" + self.config["run_name"] + "_poses_text_file_" + dataset + "_" + format(data_identifier,
'02d') + ".txt",
poses=computed_poses)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_poses_text_file_" + dataset + "_" + format(data_identifier,
'02d') + ".txt")
np.save(
"/tmp/" + self.config["run_name"] + "_transformations_" + dataset + "_" + format(data_identifier,
'02d') + ".npy",
computed_transformations)
np.save(
"/tmp/" + self.config["run_name"] + "_poses_" + dataset + "_" + format(data_identifier, '02d') + ".npy",
computed_poses)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_transformations_" + dataset + "_" + format(data_identifier,
'02d') + ".npy")
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_poses_" + dataset + "_" + format(data_identifier, '02d') + ".npy")
utility.plotting.plot_map(computed_poses=computed_poses,
path_y="/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(
data_identifier, '02d') + "_y.png",
path_2d="/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(
data_identifier, '02d') + "_2d.png",
path_3d="/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(
data_identifier, '02d') + "_3d.png",
groundtruth=gt_translations,
dataset=dataset)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(data_identifier, '02d') + "_y.png")
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(data_identifier, '02d') + "_2d.png")
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(data_identifier, '02d') + "_3d.png")
if gt_poses is not None:
utility.plotting.plot_translation_and_rotation(
computed_transformations=np.asarray(computed_transformations),
path="/tmp/" + self.config["run_name"] + "_plot_trans_rot_" + dataset + "_" + format(data_identifier,
'02d') + ".pdf",
groundtruth=gt_poses,
dataset=dataset)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_plot_trans_rot_" + dataset + "_" + format(data_identifier,
'02d') + ".pdf")
def log_config(self):
for dict_entry in self.config:
mlflow.log_param(dict_entry, self.config[dict_entry])
def transform_image_to_point_cloud(self, transformation_matrix, image):
point_cloud_transformed = torch.matmul(transformation_matrix[:, :3, :3],
image[:, :3, :, :].view(-1, 3, image.shape[2] *
image.shape[3]))
index_array_not_zero = (point_cloud_transformed[:, 0] != torch.zeros(1).to(
self.device)) | (point_cloud_transformed[:, 1] != torch.zeros(1).to(self.device)) | (
point_cloud_transformed[:, 2] != torch.zeros(1).to(
self.device))
for batch_index in range(len(point_cloud_transformed)):
point_cloud_transformed_batch = point_cloud_transformed[batch_index, :,
index_array_not_zero[batch_index]] + \
transformation_matrix[batch_index, :3, 3].view(1, 3, -1)
point_cloud_transformed = point_cloud_transformed_batch
return point_cloud_transformed
def rotate_point_cloud_transformation_matrix(self, transformation_matrix, point_cloud):
return transformation_matrix[:, :3, :3].matmul(point_cloud[:, :3, :])
def transform_point_cloud_transformation_matrix(self, transformation_matrix, point_cloud):
transformed_point_cloud = self.rotate_point_cloud_transformation_matrix(
transformation_matrix=transformation_matrix,
point_cloud=point_cloud)
transformed_point_cloud += transformation_matrix[:, :3, 3].view(-1, 3, 1)
return transformed_point_cloud
def rotate_point_cloud_euler_vector(self, euler, point_cloud):
translation = torch.zeros(3, device=self.device)
euler = euler.to(self.device)
transformation_matrix = self.geometry_handler.get_transformation_matrix_angle_axis(
translation=translation,
euler=euler, device=self.device)
return self.transform_point_cloud_transformation_matrix(
transformation_matrix=transformation_matrix,
point_cloud=point_cloud)
def augment_input(self, preprocessed_data):
# Random rotation
if self.config["random_point_cloud_rotations"]:
raise Exception("Needs to be verified for larger batches")
if self.config["random_rotations_only_yaw"]:
direction = torch.zeros((1, 3), device=self.device)
direction[0, 2] = 1
else:
direction = (torch.rand((1, 3), device=self.device))
direction = direction / torch.norm(direction)
magnitude = (torch.rand(1, device=self.device) - 0.5) * (
self.config["magnitude_random_rot"] / 180.0 * torch.Tensor([math.pi]).to(
self.device))
euler = direction * magnitude
preprocessed_data["scan_2"] = self.rotate_point_cloud_euler_vector(
point_cloud=preprocessed_data["scan_2"], euler=euler)
preprocessed_data["normal_list_2"] = self.rotate_point_cloud_euler_vector(
point_cloud=preprocessed_data["normal_list_2"], euler=euler)
return preprocessed_data
def normalize_input(self, preprocessed_data):
ranges_1 = torch.norm(preprocessed_data["scan_1"], dim=1)
ranges_2 = torch.norm(preprocessed_data["scan_2"], dim=1)
means_1 = torch.mean(ranges_1, dim=1, keepdim=True)
means_2 = torch.mean(ranges_2, dim=1, keepdim=True)
# Normalization mean is mean of both means (i.e. independent of number of points of each scan)
means_1_2 = torch.cat((means_1, means_2), dim=1)
normalization_mean = torch.mean(means_1_2, dim=1)
preprocessed_data["scan_1"] /= normalization_mean
preprocessed_data["scan_2"] /= normalization_mean
preprocessed_data["scaling_factor"] = normalization_mean
return preprocessed_data, normalization_mean
def step(self, preprocessed_dicts, epoch_losses=None, log_images_bool=False):
# Use every batchindex separately
images_model_1 = torch.zeros(self.batch_size, 4,
self.config[preprocessed_dicts[0]["dataset"]]["vertical_cells"],
self.config[preprocessed_dicts[0]["dataset"]]["horizontal_cells"],
device=self.device)
images_model_2 = torch.zeros_like(images_model_1)
for index, preprocessed_dict in enumerate(preprocessed_dicts):
if self.training_bool:
preprocessed_dict = self.augment_input(preprocessed_data=preprocessed_dict)
if self.config["normalization_scaling"]:
preprocessed_data, scaling_factor = self.normalize_input(preprocessed_data=preprocessed_dict)
# Training / Testing
image_1, _, _, point_cloud_indices_1, _ = self.img_projection(
input=preprocessed_dict["scan_1"], dataset=preprocessed_dict["dataset"])
image_2, _, _, point_cloud_indices_2, image_to_pc_indices_2 = self.img_projection(
input=preprocessed_dict["scan_2"], dataset=preprocessed_dict["dataset"])
## Only keep points that were projected to image
preprocessed_dict["scan_1"] = preprocessed_dict["scan_1"][:, :, point_cloud_indices_1]
preprocessed_dict["normal_list_1"] = preprocessed_dict["normal_list_1"][:, :, point_cloud_indices_1]
preprocessed_dict["scan_2"] = preprocessed_dict["scan_2"][:, :, point_cloud_indices_2]
preprocessed_dict["normal_list_2"] = preprocessed_dict["normal_list_2"][:, :, point_cloud_indices_2]
image_model_1 = image_1[0]
image_model_2 = image_2[0]
# Write projected image to batch
images_model_1[index] = image_model_1
images_model_2[index] = image_model_2
images_to_pcs_indices_2 = [image_to_pc_indices_2]
self.log_img_1 = image_1[:, :3]
self.log_img_2 = image_2[:, :3]
# Feed into model as batch
(translations, rotation_representation) = self.model(image_1=images_model_1,
image_2=images_model_2)
computed_transformations = self.geometry_handler.get_transformation_matrix_quaternion(
translation=translations, quaternion=rotation_representation, device=self.device)
# Following part only done when loss needs to be computed
if not self.config["inference_only"]:
# Iterate through all transformations and compute loss
losses = {
"loss_pc": torch.zeros(1, device=self.device),
"loss_po2po": torch.zeros(1, device=self.device),
"loss_po2pl": torch.zeros(1, device=self.device),
"loss_po2pl_pointwise": torch.zeros(1, device=self.device),
"loss_pl2pl": torch.zeros(1, device=self.device),
}
for batch_index, computed_transformation in enumerate(computed_transformations):
computed_transformation = torch.unsqueeze(computed_transformation, 0)
preprocessed_dict = preprocessed_dicts[batch_index]
scan_2_transformed = self.transform_point_cloud_transformation_matrix(
transformation_matrix=computed_transformation,
point_cloud=preprocessed_dict["scan_2"])
normal_list_2_transformed = self.rotate_point_cloud_transformation_matrix(
transformation_matrix=computed_transformation,
point_cloud=preprocessed_dict["normal_list_2"])
## Losses
losses_trafo, plotting_step = self.lossPointCloud(
source_point_cloud_transformed=scan_2_transformed,
source_normal_list_transformed=normal_list_2_transformed,
target_point_cloud=preprocessed_dict["scan_1"],
target_normal_list=preprocessed_dict["normal_list_1"],
compute_pointwise_loss_bool=log_images_bool)
losses["loss_po2po"] += losses_trafo["loss_po2po"]
losses["loss_po2pl"] += self.config["lambda_po2pl"] * losses_trafo["loss_po2pl"]
losses["loss_pl2pl"] += losses_trafo["loss_pl2pl"]
losses["loss_pc"] += (losses["loss_po2po"] + losses["loss_po2pl"] + losses["loss_pl2pl"])
## Image of transformed source point cloud
## Sparser if loss is only taken on image points, only for first index
if batch_index == 0:
image_2_transformed, u_pixel, v_pixel, _, _ = \
self.img_projection(input=scan_2_transformed,
dataset=preprocessed_dict["dataset"])
self.log_img_2_transformed = image_2_transformed
losses["loss_po2pl_pointwise"] = losses_trafo["loss_po2pl_pointwise"]
plotting = plotting_step
if not self.config["unsupervised_at_start"]:
target_transformation = torch.eye(4, device=self.device).view(1, 4, 4)
loss_transformation = self.lossTransformation(input=computed_transformation,
target=target_transformation)
losses["loss_pc"] /= self.batch_size
losses["loss_po2po"] /= self.batch_size
losses["loss_po2pl"] /= self.batch_size
losses["loss_pl2pl"] /= self.batch_size
if not self.config["unsupervised_at_start"]:
loss_transformation /= self.batch_size
loss = loss_transformation # Overwrite loss for identity fitting
else:
loss = losses["loss_pc"]
if self.training_bool:
loss.backward()
self.optimizer.step()
if self.config["normalization_scaling"]:
for index, preprocessed_dict in enumerate(preprocessed_dicts):
computed_transformations[index, :3, 3] *= preprocessed_dict["scaling_factor"]
# Visualization
if not log_images_bool:
_, u_pixel, v_pixel, _, _ = \
self.img_projection(input=scan_2_transformed,
dataset=preprocessed_dicts[0]["dataset"])
elif not self.config["po2po_alone"]:
self.create_images(preprocessed_data=preprocessed_dicts[0],
losses=losses,
plotting=plotting)
epoch_losses["loss_epoch"] += loss.detach().cpu().numpy()
epoch_losses["loss_point_cloud_epoch"] += (
losses["loss_pc"].detach().cpu().numpy())
epoch_losses["loss_po2po_epoch"] += losses["loss_po2po"].detach().cpu().numpy()
epoch_losses["loss_po2pl_epoch"] += losses["loss_po2pl"].detach().cpu().numpy()
epoch_losses["loss_pl2pl_epoch"] += losses["loss_pl2pl"].detach().cpu().numpy()
epoch_losses["visible_pixels_epoch"] += np.sum(
((torch.round(v_pixel.detach()) < self.config[preprocessed_dicts[0]["dataset"]][
"vertical_cells"]) & (v_pixel.detach() > torch.zeros(1).to(self.device))).cpu().numpy())
return epoch_losses, computed_transformations
else:
if self.config["normalization_scaling"]:
for index, preprocessed_dict in enumerate(preprocessed_dicts):
computed_transformations[index, :3, 3] *= preprocessed_dict["scaling_factor"]
return computed_transformations
| 20,390 | 53.231383 | 119 | py |
delora | delora-main/src/deploy/trainer.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import time
import mlflow
import mlflow.pytorch
import pickle
import torch
import numpy as np
import qqdm
import deploy.deployer
class Trainer(deploy.deployer.Deployer):
def __init__(self, config):
super(Trainer, self).__init__(config=config)
self.training_bool = True
self.optimizer = torch.optim.Adam(params=self.model.parameters(),
lr=self.config["learning_rate"])
# Load checkpoint
if self.config["checkpoint"]:
checkpoint = torch.load(self.config["checkpoint"], map_location=self.device)
## Model weights
self.model.load_state_dict(checkpoint["model_state_dict"])
print('\033[92m' + "Model weights loaded from " + self.config["checkpoint"] + "\033[0;0m")
## Optimizer parameters
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
print('\033[92m' + "Optimizer parameters loaded from " + self.config["checkpoint"] + "\033[0;0m")
## Directly train unsupervised in that case, since model is pretrained
self.config["unsupervised_at_start"] = True
if self.config["inference_only"]:
print(
"Config error: Inference only does not make sense during training. Changing to inference_only=False.")
self.config["inference_only"] = False
def train_epoch(self, epoch, dataloader):
epoch_losses = {
"loss_epoch": 0.0,
"loss_point_cloud_epoch": 0.0,
"loss_field_of_view_epoch": 0.0,
"loss_po2po_epoch": 0.0,
"loss_po2pl_epoch": 0.0,
"loss_pl2pl_epoch": 0.0,
"visible_pixels_epoch": 0.0,
"loss_yaw_pitch_roll_epoch": np.zeros(3),
"loss_true_trafo_epoch": 0.0,
}
counter = 0
qqdm_dataloader = qqdm.qqdm(dataloader, desc=qqdm.format_str('blue', 'Epoch ' + str(epoch)))
for preprocessed_dicts in qqdm_dataloader:
# Load corresponnding preprocessed kd_tree
for preprocessed_dict in preprocessed_dicts:
# Move data to devices:
for key in preprocessed_dict:
if hasattr(preprocessed_dict[key], 'to'):
preprocessed_dict[key] = preprocessed_dict[key].to(self.device)
self.optimizer.zero_grad()
epoch_losses, _ = (
self.step(
preprocessed_dicts=preprocessed_dicts,
epoch_losses=epoch_losses,
log_images_bool=counter == self.steps_per_epoch - 1 or counter == 0))
# Plotting and logging --> only first one in batch
preprocessed_data = preprocessed_dicts[0]
# Plot at very beginning to see initial state of the network
if epoch == 0 and counter == 0 and not self.config["po2po_alone"]:
self.log_image(epoch=epoch, string="_start" + "_" + preprocessed_data["dataset"])
qqdm_dataloader.set_infos({'loss': f'{float(epoch_losses["loss_epoch"] / (counter + 1)):.6f}',
'loss_point_cloud': f'{float(epoch_losses["loss_point_cloud_epoch"] / (counter + 1)):.6f}',
'loss_po2po': f'{float(epoch_losses["loss_po2po_epoch"] / (counter + 1)):.6f}',
'loss_po2pl': f'{float(epoch_losses["loss_po2pl_epoch"] / (counter + 1)):.6f}',
'loss_pl2pl': f'{float(epoch_losses["loss_pl2pl_epoch"] / (counter + 1)):.6f}',
'visible_pixels': f'{float(epoch_losses["visible_pixels_epoch"] / (counter + 1)):.6f}'})
counter += 1
return epoch_losses
def train(self):
dataloader = torch.utils.data.DataLoader(dataset=self.dataset,
batch_size=self.batch_size,
shuffle=True,
collate_fn=Trainer.list_collate,
num_workers=self.config["num_dataloader_workers"],
pin_memory=True if self.config[
"device"] == torch.device("cuda") else False)
# Check whether experiment already exists
client = mlflow.tracking.MlflowClient()
experiment_list = client.list_experiments()
id = None
for experiment in experiment_list:
if experiment.name == self.config["experiment"]:
id = experiment.experiment_id
if id is None:
print("Creating new MLFlow experiment: " + self.config["experiment"])
id = mlflow.create_experiment(self.config["experiment"])
else:
print("MLFlow experiment " + self.config["experiment"] + " already exists. Starting a new run within it.")
print("----------------------------------")
with mlflow.start_run(experiment_id=id, run_name="Training: " + self.config["training_run_name"]):
self.log_config()
for epoch in range(10000):
# Train for 1 epoch
epoch_losses = self.train_epoch(epoch=epoch, dataloader=dataloader)
# Compute metrics
epoch_losses["loss_epoch"] /= self.steps_per_epoch
epoch_losses["loss_point_cloud_epoch"] /= self.steps_per_epoch
epoch_losses["loss_po2po_epoch"] /= self.steps_per_epoch
epoch_losses["loss_po2pl_epoch"] /= self.steps_per_epoch
epoch_losses["loss_pl2pl_epoch"] /= self.steps_per_epoch
epoch_losses["visible_pixels_epoch"] /= self.steps_per_epoch
# Print update
print("--------------------------")
print("Epoch Summary: " + format(epoch, '05d') + ", loss: " + str(
epoch_losses["loss_epoch"]) + ", unsupervised: " + str(
self.config["unsupervised_at_start"]))
# Logging
print("Logging metrics and artifacts...")
# Log metrics
mlflow.log_metric("loss", float(epoch_losses["loss_epoch"]), step=epoch)
mlflow.log_metric("loss point cloud", float(epoch_losses["loss_point_cloud_epoch"]),
step=epoch)
mlflow.log_metric("loss po2po", float(epoch_losses["loss_po2po_epoch"]),
step=epoch)
mlflow.log_metric("loss po2pl", float(epoch_losses["loss_po2pl_epoch"]),
step=epoch)
mlflow.log_metric("loss pl2pl", float(epoch_losses["loss_pl2pl_epoch"]),
step=epoch)
mlflow.log_metric("visible pixels", float(epoch_losses["visible_pixels_epoch"]),
step=epoch)
# Save latest checkpoint, and create checkpoint backup all 5 epochs
## Every epoch --> will always be overwritten by latest version
torch.save({
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': float(epoch_losses["loss_epoch"]),
'parameters': self.config
}, "/tmp/" + self.config["training_run_name"] + "_latest_checkpoint.pth")
mlflow.log_artifact("/tmp/" + self.config["training_run_name"] + "_latest_checkpoint.pth")
## All 5 epochs --> will be logged permanently in MLFlow
if not epoch % 5:
torch.save({
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': float(epoch_losses["loss_epoch"]),
'parameters': self.config
}, "/tmp/" + self.config["training_run_name"] + "_checkpoint_epoch_" + str(epoch) + ".pth")
mlflow.log_artifact(
"/tmp/" + self.config["training_run_name"] + "_checkpoint_epoch_" + str(epoch) + ".pth")
# Save latest pickled full model
if not self.config["use_jit"]:
mlflow.pytorch.log_model(self.model, "latest_model_pickled")
if self.config["visualize_images"] and not self.config["po2po_alone"]:
self.log_image(epoch=epoch, string="_image")
print("...done.")
if not self.config["unsupervised_at_start"] and epoch_losses["loss_epoch"] < 1e-2:
self.config["unsupervised_at_start"] = True
print("Loss has decreased sufficiently. Switching to unsupervised mode.")
| 9,336 | 48.930481 | 130 | py |
delora | delora-main/src/ros_utils/rosbag_pcl_extractor.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import numpy as np
import rosbag
import yaml
import sensor_msgs.msg
import sensor_msgs.point_cloud2
import torch
class RosbagToPCLExtractor:
def __init__(self, rosbag_file, topic, config, preprocessing_fct):
self.rosbag_file = rosbag_file
self.topic = topic
print("Loading rosbag " + self.rosbag_file + "...")
self.bag = rosbag.Bag(self.rosbag_file)
print("...done.")
# Print information and check rosbag -----
self.num_samples = 0
info_dict = yaml.load(self.bag._get_yaml_info())
print("Duration of the bag: " + str(info_dict["duration"]))
for topic_messages in info_dict["topics"]:
if topic_messages["topic"] == self.topic:
self.num_samples = topic_messages["messages"]
if self.num_samples > 0:
print("Number of messages for topic " + self.topic + ": " + str(self.num_samples))
else:
raise Exception("Topic " + self.topic + " is not present in the given rosbag (" + self.rosbag_file + ").")
# -----------------------------------------
self.preprocessing_fct = preprocessing_fct
def ros_to_pcl(self, ros_cloud):
points_list = []
for data in sensor_msgs.point_cloud2.read_points(ros_cloud, skip_nans=True):
points_list.append([data[0], data[1], data[2], data[3]])
points_list = np.asarray(points_list)
return points_list
def preprocess_rosbag(self):
for index, (topic, msg, t) in enumerate(self.bag.read_messages(topics=[self.topic])):
if not index % 10:
print("Preprocessing scan " + str(
index) + "/" + str(self.num_samples) + " from the point cloud " + self.rosbag_file + ".")
scan = self.ros_to_pcl(msg)
# filter out noisy points
scan = scan[(scan[:, 0] != 0.0) & (scan[:, 1] != 0.0) & (scan[:, 2] != 0.0)]
scan_range = np.linalg.norm(scan[:, :3], axis=1)
scan = scan[scan_range > 0.3]
scan = torch.from_numpy(scan).to(torch.device("cpu")).transpose(0, 1).unsqueeze(0)
# Apply preprocessing
self.preprocessing_fct(scan=scan, index=index)
self.bag.close()
| 2,500 | 38.698413 | 118 | py |
delora | delora-main/src/ros_utils/odometry_publisher.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
from __future__ import division
import time
import cv2
import geometry_msgs.msg
import nav_msgs.msg
import numpy as np
import ros_numpy
import rospy
import sensor_msgs.msg
import tf2_ros
import tf.transformations
import torch
import models.model
import models.model_parts
import utility.projection
import ros_utils.odometry_integrator
# Assumes the dataset in config["datasets"][0]
class OdometryPublisher:
def __init__(self, config):
# Variables
self.config = config
self.device = config["device"]
# ROS Topics and Frames
self.lidar_topic = config["lidar_topic"]
self.lidar_frame = config["lidar_frame"]
# Model
self.img_projection = utility.projection.ImageProjectionLayer(config=config)
if self.config["use_jit"]:
self.model = torch.jit.trace(
models.model.OdometryModel(config=self.config).to(self.device),
example_inputs=(torch.zeros((1, 4, 16, 720), device=self.device),
torch.zeros((1, 4, 16, 720), device=self.device)))
else:
self.model = models.model.OdometryModel(config=self.config).to(self.device)
self.model.load_state_dict(torch.load(self.config["checkpoint"], map_location=self.device)["model_state_dict"])
# ROS publisher and subscriber
## Publisher
self.odometry_publisher = rospy.Publisher("/delora/odometry", nav_msgs.msg.Odometry, queue_size=10)
## Node
rospy.init_node('LiDAR_odometry_publisher', anonymous=True)
self.rate = rospy.Rate(10)
# TF Integrator
if self.config["integrate_odometry"]:
self.odometry_integrator = ros_utils.odometry_integrator.OdometryIntegrator(config=self.config)
## Variables
self.scaling_factor = 1.0
self.point_cloud_t_1 = None
self.point_cloud_t = None
self.image_t_1 = None
self.image_t = None
self.odometry_ros = nav_msgs.msg.Odometry()
self.translation_ros = geometry_msgs.msg.Point()
self.quaternion_ros = geometry_msgs.msg.Quaternion()
# Geometry handler
self.geometry_handler = models.model_parts.GeometryHandler(config=config)
def visualize_image(self, input):
print("Visualizing!")
image = input
image = np.asarray((image[0]))[:, ::-1]
range_image = np.sqrt(image[0] ** 2 + image[1] ** 2 + image[2] ** 2)
scaled_range_image = (255.0 / np.max(range_image) * range_image).astype(np.uint8)
color_image = cv2.applyColorMap(scaled_range_image, cv2.COLORMAP_HSV)
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
color_image[range_image == 0] = 0
cv2.imshow("test", color_image)
cv2.waitKey()
def filter_scans(self, scan, inverse=False):
scan = np.transpose(scan, (2, 1, 0))
# Filter out close, noisy points
scan = scan[(scan[:, 0, 0] != 0.0) & (scan[:, 1, 0] != 0.0) & (scan[:, 2, 0] != 0.0)]
scan_range = np.linalg.norm(scan[:, :3, 0], axis=1)
scan = scan[scan_range > 0.3]
scan = np.transpose(scan, (2, 1, 0))
return scan
def normalize_input(self, input_1, input_2):
range_1 = torch.norm(input_1, dim=1)
range_2 = torch.norm(input_2, dim=1)
range_1_2 = torch.cat((range_1, range_2), dim=1)
mean_range = torch.mean(range_1_2)
input_1 /= mean_range
input_2 /= mean_range
return mean_range.cpu().numpy()
def quat2mat(self, quat):
x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).reshape(
B, 3, 3)
return rotMat
def predict_and_publish(self, header):
with torch.no_grad():
if self.point_cloud_t_1 is not None:
# Normalize
if self.config["normalization_scaling"]:
self.scaling_factor = self.normalize_input(input_1=self.point_cloud_t,
input_2=self.point_cloud_t_1)
# Project
self.image_t_1, _, _, _, _ = self.img_projection(
input=self.point_cloud_t_1, dataset=self.config["datasets"][0])
self.image_t, _, _, _, _ = self.img_projection(
input=self.point_cloud_t, dataset=self.config["datasets"][0])
##self.visualize_image(input=np.concatenate((self.image_t_1.cpu(), self.image_t.cpu()), axis=2))
# Predict
t = time.time()
torch.cuda.synchronize()
(translation_1, rot_repr_1) = self.model(image_1=self.image_t_1,
image_2=self.image_t)
computed_transformation = self.geometry_handler.get_transformation_matrix_quaternion(
translation=translation_1, quaternion=rot_repr_1,
device=self.device)
torch.cuda.synchronize()
print("The prediction took: " + str((time.time() - t) * 1000) + "ms.")
quaternion_1 = tf.transformations.quaternion_from_matrix(
computed_transformation[0].cpu().numpy())
translation = translation_1[0].cpu().numpy() * self.scaling_factor
quaternion = quaternion_1
# Publish messages
self.translation_ros.x = translation[0]
self.translation_ros.y = translation[1]
self.translation_ros.z = translation[2]
self.quaternion_ros.x = quaternion[0]
self.quaternion_ros.y = quaternion[1]
self.quaternion_ros.z = quaternion[2]
self.quaternion_ros.w = quaternion[3]
self.odometry_ros.header = header
self.odometry_ros.header.frame_id = self.lidar_frame
self.odometry_ros.pose.pose.position = self.translation_ros
self.odometry_ros.pose.pose.orientation = self.quaternion_ros
self.odometry_publisher.publish(self.odometry_ros)
# Update TF
if self.config["integrate_odometry"]:
self.odometry_integrator.integrate(header=header, quaternion=quaternion, translation=translation)
self.point_cloud_t_1 = self.point_cloud_t * self.scaling_factor
def subscriber_callback(self, data):
structured_array = ros_numpy.numpify(data)
x = structured_array['x'].view(np.float32)
y = structured_array['y'].view(np.float32)
z = structured_array['z'].view(np.float32)
point_cloud_t_numpy = np.expand_dims(np.concatenate(
(np.expand_dims(x, axis=0), np.expand_dims(y, axis=0), np.expand_dims(z, axis=0)),
axis=0), axis=0)
# Align the point cloud with correct coordinate system
self.point_cloud_t = torch.from_numpy(self.filter_scans(point_cloud_t_numpy))
# Compute odometry estimate
self.predict_and_publish(data.header)
def publish_odometry(self):
rospy.Subscriber(self.lidar_topic, sensor_msgs.msg.PointCloud2,
self.subscriber_callback)
rospy.spin()
| 7,939 | 39.10101 | 119 | py |
delora | delora-main/src/ros_utils/odometry_integrator.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
from __future__ import division
import time
import cv2
import geometry_msgs.msg
import nav_msgs.msg
import numpy as np
import ros_numpy
import rospy
import sensor_msgs.msg
import tf2_ros
import tf.transformations
import torch
import models.model
import models.model_parts
import utility.projection
# Assumes the dataset in config["datasets"][0]
class OdometryIntegrator:
def __init__(self, config):
# Variables
self.config = config
self.device = config["device"]
# ROS Topics and Frames
self.lidar_topic = config["lidar_topic"]
self.lidar_frame = config["lidar_frame"]
self.odom_frame = "delora_odom"
self.world_frame = "world"
# ROS publisher and subscriber
## Publisher
self.tf_broadcaster = tf2_ros.TransformBroadcaster()
# Transformations
self.T_0_t = np.expand_dims(np.eye(4), axis=0)
self.R_pc_odom = np.zeros((1, 3, 3))
self.R_pc_odom[0] = np.eye(3)
self.T_pc_odom = geometry_msgs.msg.TransformStamped()
self.init_pc_odom()
self.T_world_pc = geometry_msgs.msg.TransformStamped()
self.init_world_pc()
# First TF broadcast
self.tf_broadcaster.sendTransform(self.T_world_pc)
self.tf_broadcaster.sendTransform(self.T_pc_odom)
def init_pc_odom(self):
self.T_pc_odom.header.frame_id = self.lidar_frame
self.T_pc_odom.child_frame_id = self.odom_frame
self.T_pc_odom.transform.translation.x = 0.0
self.T_pc_odom.transform.translation.y = 0.0
self.T_pc_odom.transform.translation.z = 0.0
T = np.eye(4)
T[:3, :3] = self.R_pc_odom
quaternion = tf.transformations.quaternion_from_matrix(T)
self.T_pc_odom.transform.rotation.x = quaternion[0]
self.T_pc_odom.transform.rotation.y = quaternion[1]
self.T_pc_odom.transform.rotation.z = quaternion[2]
self.T_pc_odom.transform.rotation.w = quaternion[3]
def init_world_pc(self):
self.T_world_pc.header.frame_id = self.world_frame
self.T_world_pc.child_frame_id = self.lidar_frame
self.T_world_pc.transform.translation.x = 0.0
self.T_world_pc.transform.translation.y = 0.0
self.T_world_pc.transform.translation.z = 0.0
self.T_world_pc.transform.rotation.x = 0.0
self.T_world_pc.transform.rotation.y = 0.0
self.T_world_pc.transform.rotation.z = 0.0
self.T_world_pc.transform.rotation.w = 1.0
def update_transformation(self, quaternion, translation):
T_t_1_t = np.zeros((1, 4, 4))
T_t_1_t[0, :4, :4] = np.eye(4)
T_t_1_t[0] = tf.transformations.quaternion_matrix(quaternion)
T_t_1_t[0, :3, 3] = translation
self.T_0_t = np.matmul(self.T_0_t, T_t_1_t)
global_translation = self.T_0_t[0, :3, 3]
global_quaternion = tf.transformations.quaternion_from_matrix(self.T_0_t[0])
self.T_world_pc.transform.translation.x = global_translation[0]
self.T_world_pc.transform.translation.y = global_translation[1]
self.T_world_pc.transform.translation.z = global_translation[2]
self.T_world_pc.transform.rotation.x = global_quaternion[0]
self.T_world_pc.transform.rotation.y = global_quaternion[1]
self.T_world_pc.transform.rotation.z = global_quaternion[2]
self.T_world_pc.transform.rotation.w = global_quaternion[3]
def integrate(self, header, quaternion, translation):
# Update TF
self.update_transformation(quaternion=quaternion, translation=translation)
# Publish TF
self.T_world_pc.header.stamp = header.stamp
self.tf_broadcaster.sendTransform(self.T_world_pc)
self.T_pc_odom.header.stamp = header.stamp
self.tf_broadcaster.sendTransform(self.T_pc_odom)
| 4,075 | 37.45283 | 84 | py |
delora | delora-main/src/ros_utils/publish_point_cloud_and_normals.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import copy
import numpy as np
import geometry_msgs.msg
import rospy
import sensor_msgs.msg
import sensor_msgs.point_cloud2
import std_msgs.msg
import torch
import visualization_msgs.msg
import data.dataset
class ROSPublisher:
def __init__(self, config):
# config
self.publish_normals_bool = True
# Dataset
self.dataset = data.dataset.PreprocessedPointCloudDataset(config=config)
# Ros
## Publishers
self.scan_publisher = rospy.Publisher("/lidar/points", sensor_msgs.msg.PointCloud2,
queue_size=10)
self.normals_publisher = rospy.Publisher("/lidar/normals",
visualization_msgs.msg.MarkerArray, queue_size=10)
rospy.init_node('pc2_publisher', anonymous=True)
self.rate = rospy.Rate(10)
## Messages
## Header
self.header = std_msgs.msg.Header()
self.header.stamp = rospy.Time.now()
self.header.frame_id = "lidar"
### Scan
self.scan_msg = sensor_msgs.msg.PointCloud2()
self.scan_msg.header = self.header
self.scan_msg.fields = [
sensor_msgs.msg.PointField('x', 0, sensor_msgs.msg.PointField.FLOAT32, 1),
sensor_msgs.msg.PointField('y', 4, sensor_msgs.msg.PointField.FLOAT32, 1),
sensor_msgs.msg.PointField('z', 8, sensor_msgs.msg.PointField.FLOAT32, 1),
]
### Normals
self.normals_msg = visualization_msgs.msg.MarkerArray()
self.normals_marker = visualization_msgs.msg.Marker()
self.normals_marker.type = self.normals_marker.ARROW
self.normals_marker.color.a = 1.0
self.normals_marker.color.r = 1.0
self.normals_marker.color.g = 0.0
self.normals_marker.color.b = 0.0
self.normals_marker.scale.x = 0.1
self.normals_marker.scale.y = 0.01
self.normals_marker.scale.z = 0.01
self.normals_marker.header = self.header
self.arrow_location = geometry_msgs.msg.Point()
self.arrow_orientation = geometry_msgs.msg.Quaternion()
def euler_to_quaternion(self, roll, pitch, yaw):
qx = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(
pitch / 2) * np.sin(yaw / 2)
qy = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(
pitch / 2) * np.sin(yaw / 2)
qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(
pitch / 2) * np.cos(yaw / 2)
qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(
pitch / 2) * np.sin(yaw / 2)
return np.concatenate((np.expand_dims(qx, axis=0), np.expand_dims(qy, axis=0),
np.expand_dims(qz, axis=0), np.expand_dims(qw, axis=0)), axis=0)
def publish_scan(self, scan):
scan = scan[:, :, 0]
self.scan_msg.data = scan
self.scan_msg.header.stamp = rospy.Time.now()
self.scan_publisher.publish(
sensor_msgs.point_cloud2.create_cloud(self.header, self.scan_msg.fields, scan))
def publish_normals(self, scan, normals):
self.normals_msg.markers = []
scan = scan[:, :, 0]
normals = normals[0].transpose(1, 0)
print("Normals shape: " + str(normals.shape))
for index, point in enumerate(scan):
normal = normals[index]
self.arrow_location.x = point[0]
self.arrow_location.y = point[1]
self.arrow_location.z = point[2]
self.normals_marker.pose.position = self.arrow_location
self.normals_marker.header.stamp = rospy.Time.now()
pitch = -np.arcsin(normal[2])
yaw = np.arctan2(normal[1], normal[0])
quaternion = self.euler_to_quaternion(roll=0, pitch=np.asarray(pitch),
yaw=np.asarray(yaw))
self.arrow_orientation.x = quaternion[0]
self.arrow_orientation.y = quaternion[1]
self.arrow_orientation.z = quaternion[2]
self.arrow_orientation.w = quaternion[3]
self.normals_marker.pose.orientation = self.arrow_orientation
self.normals_marker.id = index
self.normals_msg.markers.append(copy.deepcopy(self.normals_marker))
self.normals_publisher.publish(self.normals_msg)
def publish_dataset(self):
dataloader = torch.utils.data.DataLoader(self.dataset, shuffle=False, num_workers=0)
for preprocessed_data in dataloader:
print("Index: " + str(int(preprocessed_data["index"])) + " / " + str(len(dataloader)))
scan = preprocessed_data["scan_1"].numpy()[0].transpose(2, 1, 0)
self.publish_scan(scan=scan)
if self.publish_normals_bool:
normal_list_1 = preprocessed_data["normal_list_1"][0]
normals_bool = ((normal_list_1[0, 0] != 0.0) | (normal_list_1[0, 1] != 0.0) | \
(normal_list_1[0, 2] != 0.0)).numpy()
print("Scan shape: " + str(scan.shape))
self.publish_normals(
scan=scan.transpose(2, 0, 1)[:, normals_bool].transpose(1, 2, 0),
normals=normal_list_1[:, :, normals_bool].numpy())
if int(preprocessed_data["index"]) >= 100:
break
self.rate.sleep()
| 5,759 | 39.851064 | 99 | py |
delora | delora-main/src/ros_utils/convert_to_rosbag.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import os
import rospy
import rosbag
import torch
import sensor_msgs.msg
import sensor_msgs.point_cloud2
import std_msgs.msg
import data.kitti_scans
class RosbagConverter():
def __init__(self, config):
self.config = config
self.rate = 10.0
# Ros
rospy.init_node('Kitti_converter', anonymous=True)
self.topic = "/velodyne_points"
## Header
self.header = std_msgs.msg.Header()
# self.header.stamp = rospy.Time.now() #rospy.Time(0.0) # rospy.Time.now()
self.header.frame_id = "velodyne"
## Scan
self.scan_msg = sensor_msgs.msg.PointCloud2()
self.scan_msg.header = self.header
self.scan_msg.fields = [
sensor_msgs.msg.PointField('x', 0, sensor_msgs.msg.PointField.FLOAT32, 1),
sensor_msgs.msg.PointField('y', 4, sensor_msgs.msg.PointField.FLOAT32, 1),
sensor_msgs.msg.PointField('z', 8, sensor_msgs.msg.PointField.FLOAT32, 1),
sensor_msgs.msg.PointField('intensity', 12, sensor_msgs.msg.PointField.FLOAT32, 1),
]
def ensure_dir(self, file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def convert_sequence(self, dataloader, sequence, dataset_name):
time = rospy.Time.now()
duration = rospy.rostime.Duration(secs=0.1)
self.ensure_dir(self.config[dataset_name]["rosbag_path"] + format(sequence, '02d'))
outbag = self.config[dataset_name]["rosbag_path"] + format(sequence, '02d') + ".bag"
with rosbag.Bag(outbag, 'w') as outbag:
for index, point_cloud_1 in enumerate(dataloader):
if not index % 10:
print("Sequence " + str(sequence) + ", index: " + str(index) + " / " + str(
len(dataloader)) + ", time: " + str(time.secs))
scan = point_cloud_1[0].permute(1, 0).numpy()
self.scan_msg.header.stamp = time
write_msg = sensor_msgs.point_cloud2.create_cloud(self.scan_msg.header,
self.scan_msg.fields, scan)
outbag.write(self.topic, write_msg, write_msg.header.stamp)
time += duration
def convert(self):
for index_of_dataset, dataset_name in enumerate(self.config["datasets"]):
for index_of_sequence, data_identifier in enumerate(self.config[dataset_name]["data_identifiers"]):
# Do it for each sequence separately
self.config["data_identifier"] = data_identifier
# Choose which dataset
if dataset_name == "kitti":
dataset = data.kitti_scans.KITTIPointCloudDataset(base_dir=self.config[dataset_name]["data_path"],
identifier=data_identifier,
device=self.config["device"])
else:
raise Exception("Currently only KITTI is supported")
# Define dataloader
dataloader = torch.utils.data.DataLoader(dataset, shuffle=False, num_workers=0,
pin_memory=True if self.config[
"device"] == torch.device(
"cuda") else False)
print("Start sequence " + str(index_of_sequence) + " of dataset " + dataset_name + ".")
self.convert_sequence(dataloader=dataloader, sequence=data_identifier, dataset_name=dataset_name)
| 4,014 | 43.120879 | 118 | py |
delora | delora-main/src/data/kitti_scans.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import os
import glob
import numpy as np
import pykitti
import torch
class KITTIDatasetPreprocessor():
def __init__(self, config, dataset_name, preprocessing_fct):
self.config = config
self.identifier = self.config[dataset_name]["data_identifier"]
self.point_cloud_dataset = KITTIPointCloudDataset(base_dir=self.config[dataset_name]["data_path"],
identifier=self.identifier,
device=self.config["device"])
# Preprocessing Function
self.preprocessing_fct = preprocessing_fct
def preprocess(self):
for index in range(self.point_cloud_dataset.num_elements):
if not index % 10:
print("Preprocessing scan " + str(index) + "/" + str(
self.point_cloud_dataset.num_elements) + " from sequence " + format(self.identifier, '02d') + ".")
scan = self.point_cloud_dataset.get_velo_torch(index).unsqueeze(0)
# Apply preprocessing
self.preprocessing_fct(scan=scan, index=index)
class KITTIPointCloudDataset(torch.utils.data.dataset.Dataset):
def __init__(self, base_dir, identifier="00", device=torch.device("cuda")):
super(KITTIPointCloudDataset, self).__init__()
self.base_dir = base_dir
self.identifier = identifier
self.device = device
self.velo_file_list = sorted(glob.glob(
os.path.join(self.base_dir, format(self.identifier, '02d'), "velodyne", '*.bin')))
self.velo_data_generator = pykitti.utils.yield_velo_scans(self.velo_file_list)
self.num_elements = len(self.velo_file_list)
def get_velo(self, idx):
return pykitti.utils.load_velo_scan(self.velo_file_list[idx])
def get_velo_torch(self, idx):
return torch.from_numpy(self.get_velo(idx)).to(torch.device("cpu")).transpose(0, 1)
def __getitem__(self, index):
return self.get_velo_torch(idx=index)
def __len__(self):
return self.num_elements
| 2,322 | 39.754386 | 118 | py |
delora | delora-main/src/data/dataset.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import csv
import glob
import os
import torch
import numpy as np
# Preprocessed point cloud dataset is invariant of source of data --> always same format
# Data structure: dataset --> sequence --> scan
# e.g. dataset=anymal, sequence=00, scan=000000
# Naming: e.g. num_scans_sequences_datasets means number of scans in the corresponding
# sequences in the corresponding datasets
class PreprocessedPointCloudDataset(torch.utils.data.dataset.Dataset):
def __init__(self, config):
super(PreprocessedPointCloudDataset, self).__init__()
self.config = config
# Store dataset in RAM
self.store_dataset_in_RAM = self.config["store_dataset_in_RAM"]
# Members
self.normals_files_in_datasets = []
self.scans_files_in_datasets = []
num_sequences_in_datasets = np.zeros(len(self.config["datasets"]))
num_scans_in_sequences_in_datasets = []
num_scans_in_datasets = np.zeros(len(self.config["datasets"]))
# Go through dataset(s) and create file lists
for index_of_dataset, dataset in enumerate(self.config["datasets"]):
## Paths of sequences
normals_files_in_sequences = []
scans_files_in_sequences = []
num_scans_in_sequences = np.zeros(len(self.config[dataset]["data_identifiers"]), dtype=int)
## Go through sequences
for index_of_sequence, data_identifier in enumerate(self.config[dataset]["data_identifiers"]):
if not os.path.exists(
os.path.join(self.config[dataset]["preprocessed_path"], format(data_identifier, '02d') + "/")):
raise Exception(
"The specified path and dataset " + os.path.join(self.config[dataset]["preprocessed_path"],
format(data_identifier,
'02d') + "/") + "does not exist.")
name = os.path.join(self.config[dataset]["preprocessed_path"], format(data_identifier, '02d') + "/")
normals_name = os.path.join(name, "normals/")
scans_name = os.path.join(name, "scans/")
# Files
normals_files_in_sequences.append(sorted(glob.glob(os.path.join(normals_name, '*.npy'))))
scans_files_in_sequences.append(sorted(glob.glob(os.path.join(scans_name, '*.npy'))))
# -1 is important (because looking at consecutive scans at t and t+1)
num_scans_in_sequences[index_of_sequence] = len(normals_files_in_sequences[index_of_sequence]) - 1
num_sequences_in_datasets[index_of_dataset] = len(self.config[dataset]["data_identifiers"])
num_scans_in_sequences_in_datasets.append(num_scans_in_sequences)
num_scans_in_datasets[index_of_dataset] = np.sum(num_scans_in_sequences, dtype=int)
self.normals_files_in_datasets.append(normals_files_in_sequences)
self.scans_files_in_datasets.append(scans_files_in_sequences)
self.num_scans_overall = np.sum(num_scans_in_datasets, dtype=int)
# Dataset, sequence, scan indices (mapping overall_index --> dataset, sequence, scan-indices)
self.indices_dataset = np.zeros(self.num_scans_overall, dtype=int)
self.indices_sequence = np.zeros(self.num_scans_overall, dtype=int)
self.indices_scan = np.zeros(self.num_scans_overall, dtype=int)
overall_index = 0
for index_dataset, num_scans_in_sequences in enumerate(num_scans_in_sequences_in_datasets):
for index_sequence, num_scans in enumerate(num_scans_in_sequences):
for index_scan in range(num_scans):
self.indices_dataset[overall_index] = index_dataset
self.indices_sequence[overall_index] = index_sequence
self.indices_scan[overall_index] = index_scan
overall_index += 1
# In case of RAM loading --> keep data in memory
if not self.store_dataset_in_RAM:
print('\033[92m' + "Dataset will be kept on disk. For higher performance enable RAM loading."
+ "\033[0;0m")
else:
print('\033[92m' + "Loading all scans and normals into the RAM / SWAP... "
+ "Disable this if you do not have enough RAM." + "\033[0;0m")
self.normals_RAM = []
self.scans_RAM = []
counter = 0
for index_dataset, num_scans_in_sequences in enumerate(num_scans_in_sequences_in_datasets):
normals_in_dataset_RAM = []
scans_in_dataset_RAM = []
for index_sequence, num_scans in enumerate(num_scans_in_sequences):
normals_in_sequence_RAM = []
scans_in_sequence_RAM = []
# + 1 is important here, since we need to store ALL scans (also last of each sequence)
for index_scan in range(num_scans + 1):
(normal_list, scan) = self.load_files_from_disk(index_dataset=index_dataset,
index_sequence=index_sequence,
index_scan=index_scan)
normals_in_sequence_RAM.append(normal_list)
scans_in_sequence_RAM.append(scan)
counter += 1
normals_in_dataset_RAM.append(normals_in_sequence_RAM)
scans_in_dataset_RAM.append(scans_in_sequence_RAM)
self.normals_RAM.append(normals_in_dataset_RAM)
self.scans_RAM.append(scans_in_dataset_RAM)
print('\033[92m' + "Loaded " + str(counter) + " scans to RAM/swap." + "\033[0;0m")
def load_files_from_disk(self, index_dataset, index_sequence, index_scan):
normal_list = torch.from_numpy(
np.load(self.normals_files_in_datasets[index_dataset][index_sequence][index_scan])).to(
torch.device("cpu")).permute(1, 0).view(1, 3, -1)
scan = torch.from_numpy(
np.load(self.scans_files_in_datasets[index_dataset][index_sequence][index_scan])).to(
torch.device("cpu")).permute(1, 0).view(1, 3, -1)
return (normal_list, scan)
def __getitem__(self, index):
index_dataset = self.indices_dataset[index]
index_sequence = self.indices_sequence[index]
index_scan = self.indices_scan[index]
if self.store_dataset_in_RAM:
normal_list_1 = self.normals_RAM[index_dataset][index_sequence][index_scan]
scan_1 = self.scans_RAM[index_dataset][index_sequence][index_scan]
normal_list_2 = self.normals_RAM[index_dataset][index_sequence][index_scan + 1]
scan_2 = self.scans_RAM[index_dataset][index_sequence][index_scan + 1]
else:
(normal_list_1, scan_1) = self.load_files_from_disk(index_dataset=index_dataset,
index_sequence=index_sequence,
index_scan=index_scan)
(normal_list_2, scan_2) = self.load_files_from_disk(index_dataset=index_dataset,
index_sequence=index_sequence,
index_scan=index_scan + 1)
# Encapsulate data
preprocessed_data = {
"index": index,
"index_dataset": index_dataset,
"index_sequence": index_sequence,
"index_scan": index_scan,
"dataset": self.config["datasets"][index_dataset],
"normal_list_1": normal_list_1,
"normal_list_2": normal_list_2,
"scan_1": scan_1,
"scan_2": scan_2,
}
return preprocessed_data
def __len__(self):
return self.num_scans_overall
# Groundtruth poses are also always saved in this format
class PoseDataset(torch.utils.data.dataset.Dataset):
def __init__(self, config):
self.config = config
self.device = config["device"]
# Members
self.poses_datasets = []
num_sequences_datasets = np.zeros(len(self.config["datasets"]))
num_scans_sequences_datasets = []
num_scans_datasets = np.zeros(len(self.config["datasets"]))
# Go through dataset(s)
for index_of_dataset, dataset in enumerate(self.config["datasets"]):
base_dir = config[dataset]["pose_data_path"]
poses_sequences = []
num_scans_sequences = np.zeros(len(self.config[dataset]["data_identifiers"]),
dtype=int)
for index_of_sequence, data_identifier in enumerate(
config[dataset]["data_identifiers"]):
if base_dir:
pose_file_name = os.path.join(base_dir,
format(data_identifier, '02d') + '.txt')
with open(pose_file_name, newline="") as csvfile:
row_reader = csv.reader(csvfile, delimiter=" ")
num_poses = sum(1 for row in row_reader)
poses_sequences.append(np.zeros((num_poses, 12)))
with open(pose_file_name, newline="") as csvfile:
row_reader = csv.reader(csvfile, delimiter=" ")
for index_of_scan, row in enumerate(row_reader):
poses_sequences[index_of_sequence][index_of_scan, :] = np.asarray(
[float(element) for element in row])
# -1 is important (because looking at consecutive scans at t and t+1)
num_scans_sequences[index_of_sequence] = num_poses - 1
else:
print("Groundtruth file does not exist. Not using any ground truth for it.")
name = os.path.join(self.config[dataset]["preprocessed_path"], format(data_identifier, '02d') + "/")
normals_name = os.path.join(name, "normals/")
normals_files_in_sequence = sorted(glob.glob(os.path.join(normals_name, '*.npy')))
num_poses = len(normals_files_in_sequence)
poses_sequences.append(np.zeros((num_poses, 1)))
for index_of_scan in range(num_poses):
poses_sequences[index_of_sequence][index_of_scan, 0] = None
num_sequences_datasets[index_of_dataset] = len(
self.config[dataset]["data_identifiers"])
num_scans_sequences_datasets.append(num_scans_sequences)
num_scans_datasets[index_of_dataset] = np.sum(num_scans_sequences, dtype=int)
self.poses_datasets.append(poses_sequences)
self.num_scans_overall = np.sum(num_scans_datasets, dtype=int)
# Dataset, sequence, scan indices (mapping overall_index --> dataset, sequence, scan-indices)
self.indices_dataset = np.zeros(self.num_scans_overall, dtype=int)
self.indices_sequence = np.zeros(self.num_scans_overall, dtype=int)
self.indices_scan = np.zeros(self.num_scans_overall, dtype=int)
counter = 0
for index_dataset, num_scans_sequences in enumerate(num_scans_sequences_datasets):
for index_sequence, num_scans in enumerate(num_scans_sequences):
for index_scan in range(num_scans):
self.indices_dataset[counter] = index_dataset
self.indices_sequence[counter] = index_sequence
self.indices_scan[counter] = index_scan
counter += 1
def return_translations(self, index_of_dataset, index_of_sequence):
print(self.poses_datasets[index_of_dataset][index_of_sequence][0, 0])
if not np.isnan(self.poses_datasets[index_of_dataset][index_of_sequence][0, 0]):
return self.poses_datasets[index_of_dataset][index_of_sequence][:, [3, 7, 11]]
else:
return None
def return_poses(self, index_of_dataset, index_of_sequence):
if not np.isnan(self.poses_datasets[index_of_dataset][index_of_sequence][0, 0]):
return self.poses_datasets[index_of_dataset][index_of_sequence]
else:
return None
def __getitem__(self, index):
index_dataset = self.indices_of_dataset[index]
index_sequence = self.indices_sequence[index]
index_scan = self.indices_scan[index]
if not np.isnan(self.poses_datasets[index_dataset][index_sequence][index_scan, 0]):
return self.poses_datasets[index_dataset][index_sequence][index_scan]
else:
return None
def __len__(self):
return self.num_scans_overall
| 13,227 | 51.701195 | 120 | py |
delora | delora-main/src/data/rosbag_scans.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import os
import glob
import numpy as np
import torch
import ros_utils.rosbag_pcl_extractor
class RosbagDatasetPreprocessor():
def __init__(self, config, dataset_name, topic, preprocessing_fct):
self.config = config
self.rosbag_dir = self.config[dataset_name]["data_path"]
self.identifier = self.config[dataset_name]["data_identifier"]
# Look at file
self.rosbag_file = sorted(glob.glob(
os.path.join(self.rosbag_dir, "" + format(self.identifier, '02d') + '*')))
if len(self.rosbag_file) > 1:
raise Exception(
"Identifier does not uniquely define a rosbag. There are multiple files containing "
+ format(self.identifier, '02d') + ".")
elif len(self.rosbag_file) == 0:
raise Exception(
"Rosbag corresponding to data identifier "
+ str(self.identifier) + " must include " + format(self.identifier, '02d') + ".")
self.rosbag_file = self.rosbag_file[0]
# Use rosbag tool
self.rosbag_extractor = ros_utils.rosbag_pcl_extractor.RosbagToPCLExtractor(
rosbag_file=self.rosbag_file, topic=topic, config=self.config, preprocessing_fct=preprocessing_fct)
def preprocess(self):
self.rosbag_extractor.preprocess_rosbag()
| 1,574 | 37.414634 | 111 | py |
delora | delora-main/src/losses/icp_losses.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import scipy.spatial
import torch
class ICPLosses(torch.nn.Module):
def __init__(self, config):
super(ICPLosses, self).__init__()
self.config = config
# Define single loss components
if self.config["point_to_point_loss"]:
self.point_to_point_loss = KDPointToPointLoss(config=self.config)
if self.config["point_to_plane_loss"]:
self.point_to_plane_loss = KDPointToPlaneLoss(config=self.config)
if self.config["plane_to_plane_loss"]:
self.plane_to_plane_loss = KDPlaneToPlaneLoss(config=self.config)
def find_target_correspondences(self, kd_tree_target, source_list_numpy):
target_correspondence_indices = kd_tree_target[0].query(source_list_numpy[0])[1]
return target_correspondence_indices
def forward(self, source_point_cloud_transformed,
source_normal_list_transformed,
target_point_cloud,
target_normal_list,
compute_pointwise_loss_bool):
# Build kd-tree
target_kd_tree = [scipy.spatial.cKDTree(target_point_cloud[0].permute(1, 0).cpu())]
if self.config["po2po_alone"]:
# Find corresponding target points for all source points
target_correspondences_of_source_points = \
torch.from_numpy(self.find_target_correspondences(
kd_tree_target=target_kd_tree,
source_list_numpy=source_point_cloud_transformed.permute(0, 2,
1).detach().cpu().numpy())).to(
self.config["device"])
source_points_where_no_normals = source_point_cloud_transformed
target_points_where_no_normals = target_point_cloud[:, :,
target_correspondences_of_source_points]
else:
source_where_normals_bool = (source_normal_list_transformed[:, 0, :] != 0) | (
source_normal_list_transformed[:, 1, :] != 0) | (
source_normal_list_transformed[:, 2, :] != 0)
target_where_normals_bool = (target_normal_list[:, 0, :] != 0) | (
target_normal_list[:, 1, :] != 0) | (target_normal_list[:, 2, :] != 0)
# Differentiate between source points where normals exist / do not exist
source_point_cloud_transformed_where_normals = source_point_cloud_transformed[:, :,
source_where_normals_bool[0]]
source_normal_list_transformed_where_normals = source_normal_list_transformed[:, :,
source_where_normals_bool[0]]
source_point_cloud_transformed_where_no_normals = source_point_cloud_transformed[:, :,
~source_where_normals_bool[0]]
# Transform to numpy for KD-tree handling
source_point_cloud_transformed_where_normals_numpy = \
source_point_cloud_transformed_where_normals.permute(0, 2, 1).detach().cpu().numpy()
source_point_cloud_transformed_where_no_normals_numpy = \
source_point_cloud_transformed_where_no_normals.permute(0, 2,
1).detach().cpu().numpy()
# Find corresponding target points for source points which have normals,
target_correspondences_of_source_points_where_source_normals_indices = \
torch.from_numpy(self.find_target_correspondences(
kd_tree_target=target_kd_tree,
source_list_numpy=source_point_cloud_transformed_where_normals_numpy)).to(
self.config["device"])
# Find corresponding target points for source points which have no normals
target_correspondences_of_source_points_where_no_source_normals_indices = \
torch.from_numpy(self.find_target_correspondences(
kd_tree_target=target_kd_tree,
source_list_numpy=source_point_cloud_transformed_where_no_normals_numpy)).to(
self.config["device"])
# 2 cases we compute losses for:
if self.config["point_to_point_loss"]:
# 1) NO source normal and NO target normal,
## For EACH source point without normal, there is a corresponding target point
target_point_cloud_where_no_source_normals = \
target_point_cloud[:, :,
target_correspondences_of_source_points_where_no_source_normals_indices]
## We only need to keep the source target pairs when there also exists NO target normal
target_where_normals_where_no_source_normals_bool = \
target_where_normals_bool[:,
target_correspondences_of_source_points_where_no_source_normals_indices]
target_points_where_no_normals = target_point_cloud_where_no_source_normals[:, :,
~target_where_normals_where_no_source_normals_bool[
0]]
source_points_where_no_normals = source_point_cloud_transformed_where_no_normals[:,
:,
~target_where_normals_where_no_source_normals_bool[
0]]
## 2) We need corresponding target normals where source has normals
target_normal_list_where_source_normals_with_holes = \
target_normal_list[:, :,
target_correspondences_of_source_points_where_source_normals_indices]
target_point_cloud_where_source_normals = \
target_point_cloud[:, :,
target_correspondences_of_source_points_where_source_normals_indices]
# These 4 arrays are now the ones / corresponding ones of source points that have normals
# --> Now need to remove points where we have no target normal
target_where_normals_where_source_normals_bool = \
target_where_normals_bool[:,
target_correspondences_of_source_points_where_source_normals_indices]
source_points_where_normals = source_point_cloud_transformed_where_normals[:, :,
target_where_normals_where_source_normals_bool[0]]
source_normals_where_normals = source_normal_list_transformed_where_normals[:, :,
target_where_normals_where_source_normals_bool[0]]
target_points_where_normals = target_point_cloud_where_source_normals[:, :,
target_where_normals_where_source_normals_bool[0]]
target_normals_where_normals = target_normal_list_where_source_normals_with_holes[:, :,
target_where_normals_where_source_normals_bool[0]]
# Define losses
loss_po2po = torch.zeros(1, device=self.config["device"])
loss_po2pl = torch.zeros(1, device=self.config["device"])
loss_pl2pl = torch.zeros(1, device=self.config["device"])
po2pl_pointwise_loss = torch.zeros(1, device=self.config["device"])
if self.config["point_to_point_loss"]:
loss_po2po, po2po_pointwise_loss, po2po_source_list = self.point_to_point_loss.forward(
source_list=source_points_where_no_normals,
target_correspondences_list=target_points_where_no_normals,
compute_pointwise_loss_bool=False)
if self.config["point_to_plane_loss"]:
loss_po2pl, po2pl_pointwise_loss = \
self.point_to_plane_loss(
source_list=source_points_where_normals,
target_correspondences_list=target_points_where_normals,
target_correspondences_normal_vectors=target_normals_where_normals,
compute_pointwise_loss_bool=compute_pointwise_loss_bool)
if self.config["plane_to_plane_loss"]:
loss_pl2pl = self.plane_to_plane_loss(
source_normals=source_normals_where_normals,
target_correspondences_normals=target_normals_where_normals)
losses = {
"loss_po2po": loss_po2po,
"loss_po2pl": loss_po2pl,
"loss_po2pl_pointwise": po2pl_pointwise_loss,
"loss_pl2pl": loss_pl2pl,
}
plotting = {
"scan_2_transformed": source_points_where_normals,
"normals_2_transformed": source_normals_where_normals
} if not self.config["po2po_alone"] else None
return losses, plotting
class KDPointToPointLoss:
def __init__(self, config):
self.config = config
self.lossMeanMSE = torch.nn.MSELoss()
self.lossPointMSE = torch.nn.MSELoss(reduction="none")
def compute_loss(self, source_list, target_correspondences_list, compute_pointwise_loss_bool):
loss = self.lossMeanMSE(source_list, target_correspondences_list)
if compute_pointwise_loss_bool:
source_pointwise_loss = self.lossPointMSE(source_list,
target_correspondences_list).detach()
else:
source_pointwise_loss = None
return loss, \
source_pointwise_loss.transpose(0, 1).view(1, 3,
-1) if compute_pointwise_loss_bool else None, \
source_list.transpose(0, 1).view(1, 3, -1)
def forward(self, source_list, target_correspondences_list, compute_pointwise_loss_bool):
return self.compute_loss(source_list=source_list,
target_correspondences_list=target_correspondences_list,
compute_pointwise_loss_bool=compute_pointwise_loss_bool)
class KDPointToPlaneLoss(torch.nn.Module):
def __init__(self, config):
super(KDPointToPlaneLoss, self).__init__()
self.config = config
self.lossMeanMSE = torch.nn.MSELoss()
self.lossPointMSE = torch.nn.MSELoss(reduction="none")
def compute_loss(self, source_list, target_correspondences_list, normal_vectors, compute_pointwise_loss):
source_pointwise_distance_vector = (source_list - target_correspondences_list)
source_pointwise_normal_distance = source_pointwise_distance_vector.permute(2, 0, 1).matmul(
normal_vectors.permute(2, 1, 0))
loss = self.lossMeanMSE(source_pointwise_normal_distance,
torch.zeros(source_pointwise_normal_distance.shape,
device=self.config["device"]))
return loss, \
source_pointwise_distance_vector if compute_pointwise_loss else None
def forward(self, source_list, target_correspondences_list, target_correspondences_normal_vectors,
compute_pointwise_loss_bool):
return self.compute_loss(source_list=source_list,
target_correspondences_list=target_correspondences_list,
normal_vectors=target_correspondences_normal_vectors,
compute_pointwise_loss=compute_pointwise_loss_bool)
class KDPlaneToPlaneLoss(torch.nn.Module):
def __init__(self, config):
super(KDPlaneToPlaneLoss, self).__init__()
self.config = config
self.lossMeanMSE = torch.nn.MSELoss()
# self.lossPointMSE = torch.nn.MSELoss(reduction="none")
def forward(self, source_normals, target_correspondences_normals):
source_normals = source_normals.permute(2, 0, 1)
if self.config["normal_loss"] == "linear":
target_correspondences_normals = target_correspondences_normals.permute(2, 1, 0)
dot_products = torch.matmul(source_normals, target_correspondences_normals)
dot_products = dot_products
return self.lossMeanMSE(1 - dot_products,
torch.zeros(dot_products.shape, device=self.config["device"]))
elif self.config["normal_loss"] == "squared":
target_correspondences_normals = target_correspondences_normals.permute(2, 0, 1)
distance = torch.norm(source_normals - target_correspondences_normals, dim=2,
keepdim=True)
weighted_distance = distance
return self.lossMeanMSE(weighted_distance, torch.zeros(weighted_distance.shape,
device=self.config["device"]))
else:
raise Exception("The normal loss which is defined here is not admissible.")
| 13,291 | 54.153527 | 109 | py |
delora | delora-main/src/preprocessing/normal_computation.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import numpy as np
import torch
import utility.linalg
# Need an instance of this for each dataset
class NormalsComputer:
def __init__(self, config, dataset_name):
self.config = config
# Preprocessing (arrays should not be build at each iteration)
coordinate_meshgrid = np.meshgrid(range(self.config[dataset_name]["horizontal_cells"]),
range(self.config[dataset_name]["vertical_cells"]))
self.u_image_coords_list = torch.from_numpy(coordinate_meshgrid[0]).view(
self.config[dataset_name]["vertical_cells"] * self.config[dataset_name][
"horizontal_cells"]).to(self.config["device"])
self.v_image_coords_list = torch.from_numpy(coordinate_meshgrid[1]).view(
self.config[dataset_name]["vertical_cells"] * self.config[dataset_name][
"horizontal_cells"]).to(self.config["device"])
# Store dataset info
self.dataset_name = dataset_name
def get_image_coords(self, image):
# Target list is extracted from target image (visible points)
list = image[0, :3, ].view(
3, self.config[self.dataset_name]["vertical_cells"] * self.config[self.dataset_name][
"horizontal_cells"]).transpose(0, 1)
indices_non_zero = (list[:, 0] != 0) & (list[:, 1] != 0) & (list[:, 2] != 0)
list = list[indices_non_zero]
u_image_coordinates_list = self.u_image_coords_list[indices_non_zero]
v_image_coordinates_list = self.v_image_coords_list[indices_non_zero]
return list, u_image_coordinates_list, v_image_coordinates_list
def check_planarity(self, eigenvalues):
epsilon_plane = self.config["epsilon_plane"]
epsilon_line = self.config["epsilon_line"]
# First (plane-) criterion also holds for line, therefor also check that line criterion
# (only one large e-value) does NOT hold
planar_indices = ((eigenvalues[:, 0] / torch.sum(eigenvalues, dim=1) < epsilon_plane) & (
(eigenvalues[:, 0] + eigenvalues[:, 1]) / torch.sum(eigenvalues,
dim=1) > epsilon_line))
return planar_indices
def covariance_eigen_decomposition(self, point_neighbors, point_locations):
# Filter by range
epsilon_range = self.config["epsilon_range"]
range_deviation_indices = torch.abs(torch.norm(point_neighbors, dim=1)
- torch.norm(point_locations, dim=1)) > epsilon_range
# Annulate neighbors that have a range which deviates too much from point location
point_neighbors.permute(0, 2, 1)[range_deviation_indices] = 0.0
point_neighbors_permuted = point_neighbors.permute(2, 1, 0)
covariance_matrices, number_neighbours = utility.linalg.cov(
point_neighbors=point_neighbors_permuted)
# Filter by amount of neighbors
# Dropping eigenvectors and values where min number of points is not met
where_enough_neighbors_bool = number_neighbours >= self.config[
"min_num_points_in_neighborhood_to_determine_point_class"]
covariance_matrices = covariance_matrices[where_enough_neighbors_bool]
eigenvalues, eigenvectors = torch.symeig(covariance_matrices.to(torch.device("cpu")),
eigenvectors=True)
eigenvalues = eigenvalues.to(self.config["device"])
eigenvectors = eigenvectors.to(self.config["device"])
# e-vectors corresponding to smallest e-value
all_normal_vectors = eigenvectors[:, :, 0]
point_locations = point_locations[0].permute(1, 0)
dot_products = all_normal_vectors.view(-1, 1, 3).matmul(
point_locations[where_enough_neighbors_bool].view(-1, 3, 1)).squeeze()
all_normal_vectors[dot_products > 0] *= -1
# Now for all points which do not have a normal we simply store (0, 0, 0)^T
all_normal_vectors_with_zeros = torch.zeros_like(point_locations)
all_normal_vectors_with_zeros[where_enough_neighbors_bool] = all_normal_vectors
return all_normal_vectors_with_zeros, where_enough_neighbors_bool, point_locations
def compute_normal_vectors(self, image):
# Get coordinates and target points
point_list, u_coordinates, v_coordinates = self.get_image_coords(image=image)
image = image[0, :3]
point_neighbors = []
# Take patch around each point
a = int(self.config[self.dataset_name]["neighborhood_side_length"][0] / 2)
b = int(self.config[self.dataset_name]["neighborhood_side_length"][1] / 2)
for v_neighbor in range(-a, a + 1):
for u_neighbor in range(-b, b + 1):
v_neighbor_coords = v_coordinates + v_neighbor
u_neighbor_coords = u_coordinates + u_neighbor
# These can be negative, set them to 0 --> will be biased neighborhood at edges
v_neighbor_coords[v_neighbor_coords < 0] = 0
v_neighbor_coords[
v_neighbor_coords > (self.config[self.dataset_name]["vertical_cells"] - 1)] = (
self.config[self.dataset_name]["vertical_cells"] - 1)
u_neighbor_coords[u_neighbor_coords < 0] = 0
u_neighbor_coords[
u_neighbor_coords > (self.config[self.dataset_name]["horizontal_cells"] - 1)] = (
self.config[self.dataset_name]["horizontal_cells"] - 1)
if not len(point_neighbors):
point_neighbors = image[:, v_neighbor_coords, u_neighbor_coords].view(1, 3, -1)
else:
point_neighbors = torch.cat((point_neighbors,
image[:, v_neighbor_coords, u_neighbor_coords
].view(1, 3, -1)), dim=0)
del a, b
point_locations = image[:, v_coordinates, u_coordinates].view(1, 3, -1)
return self.covariance_eigen_decomposition(point_neighbors=point_neighbors,
point_locations=point_locations)
| 6,487 | 51.747967 | 101 | py |
delora | delora-main/bin/run_training.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import click
import numpy as np
import torch
import yaml
import deploy.trainer
@click.command()
@click.option('--training_run_name', prompt='MLFlow name of the run',
help='The name under which the run can be found afterwards.')
@click.option('--experiment_name', help='High-level training sequence name for clustering in MLFlow.',
default="")
@click.option('--checkpoint', help='Path to the saved checkpoint. Leave empty if none.',
default="")
def config(training_run_name, experiment_name, checkpoint):
f = open('config/config_datasets.yaml')
config = yaml.load(f, Loader=yaml.FullLoader)
f = open('config/deployment_options.yaml')
deployment_options = yaml.load(f, Loader=yaml.FullLoader)
config.update(deployment_options)
f = open('config/hyperparameters.yaml')
network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader)
config.update(network_hyperparameters)
# Default: load parameters from yaml
parameters_exist = False
# CLI Input
## Checkpoint for continuing training
if checkpoint:
### Parameters from previous run?
if 'parameters' in torch.load(checkpoint):
print("\033[92m" +
"Found parameters in checkpoint of previous run! Setting part of parameters to those ones."
+ "\033[0;0m")
parameters_exist = True
else:
print("Checkpoint does not contain any parameters. Using those ones specified in the YAML files.")
# Parameters that are set depending on whether provided in checkpoint
if parameters_exist:
loaded_config = torch.load(checkpoint)['parameters']
## Device to be used
loaded_config["device"] = torch.device(config["device"])
loaded_config["datasets"] = config["datasets"]
for dataset in loaded_config["datasets"]:
loaded_config[dataset]["training_identifiers"] = config[dataset]["training_identifiers"]
loaded_config[dataset]["data_identifiers"] = loaded_config[dataset]["training_identifiers"]
config = loaded_config
# Some parameters are only initialized when not taken from checkpoint
else:
## Device to be used
config["device"] = torch.device(config["device"])
for dataset in config["datasets"]:
config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"]
## Convert angles to radians
for dataset in config["datasets"]:
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0)
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0)
config["horizontal_field_of_view"][0] *= (np.pi / 180.0)
config["horizontal_field_of_view"][1] *= (np.pi / 180.0)
# Parameters that are always set
if checkpoint:
config["checkpoint"] = str(checkpoint)
else:
config["checkpoint"] = None
## Trainings run name --> mandatory
config["training_run_name"] = str(training_run_name)
config["run_name"] = config["training_run_name"]
## Experiment name, default specified in deployment_options.yaml
if experiment_name:
config["experiment"] = experiment_name
## Mode
config["mode"] = "training"
print("----------------------------------")
print("Configuration for this run: ")
print(config)
print("----------------------------------")
return config
if __name__ == "__main__":
config = config(standalone_mode=False)
trainer = deploy.trainer.Trainer(config=config)
trainer.train()
| 3,837 | 39.4 | 110 | py |
delora | delora-main/bin/run_rosnode.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import click
import numpy as np
import torch
import yaml
import ros_utils.odometry_publisher
@click.command()
@click.option('--checkpoint', prompt='Path to the saved model you want to test')
@click.option('--dataset',
prompt='On which dataset configuration do you want to get predictions? [kitti, darpa, ....]. Does not '
'need to be one of those, but the sensor paramaters are looked up in the config_datasets.yaml.')
@click.option('--lidar_topic', prompt='Topic of the published LiDAR pointcloud2 messages.')
@click.option('--lidar_frame', prompt='LiDAR frame in TF tree.')
@click.option('--integrate_odometry', help='Whether the published odometry should be integrated in the TF tree.',
default=True)
def config(checkpoint, dataset, lidar_topic, lidar_frame, integrate_odometry):
f = open('config/config_datasets.yaml')
config = yaml.load(f, Loader=yaml.FullLoader)
f = open('config/deployment_options.yaml')
deployment_options = yaml.load(f, Loader=yaml.FullLoader)
config.update(deployment_options)
f = open('config/hyperparameters.yaml')
network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader)
config.update(network_hyperparameters)
# Mode
config["mode"] = "training"
# No dropout during testing
if config["use_dropout"]:
config["use_dropout"] = False
print("Deactivating dropout for this mode.")
# CLI Input
## Checkpoint
config["checkpoint"] = str(checkpoint)
## Dataset
config["datasets"] = [str(dataset)]
## LiDAR Topic
config["lidar_topic"] = str(lidar_topic)
## LiDAR Frame
config["lidar_frame"] = str(lidar_frame)
## Integrate odometry
config["integrate_odometry"] = integrate_odometry
# Device to be used
if config["device"] == "cuda":
config["device"] = torch.device("cuda")
else:
config["device"] = torch.device("cpu")
# Convert angles to radians
for dataset in config["datasets"]:
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0)
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0)
config["horizontal_field_of_view"][0] *= (np.pi / 180.0)
config["horizontal_field_of_view"][1] *= (np.pi / 180.0)
print("----------------------------------")
print("Configuration for this run: ")
print(config)
print("----------------------------------")
return config
if __name__ == "__main__":
config = config(standalone_mode=False)
publisher = ros_utils.odometry_publisher.OdometryPublisher(config=config)
publisher.publish_odometry()
| 2,881 | 35.948718 | 117 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.