repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GrUMoDepth | GrUMoDepth-main/evaluate.py | #
# MIT License
#
# Copyright (c) 2020 Matteo Poggi m.poggi@unibo.it
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Source: modified from https://github.com/mattpoggi/mono-uncertainty
"""
from __future__ import absolute_import, division, print_function
import copy
import warnings
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pickle
import torch
import monodepth2
from monodepth2.options import MonodepthOptions
from monodepth2.layers import disp_to_depth
from monodepth2.utils import readlines
from extended_options import UncertaintyOptions
import progressbar
from eval_utils import compute_eigen_errors_visu, compute_eigen_errors, compute_aucs
cv2.setNumThreads(0)
splits_dir = os.path.join(os.path.dirname(__file__), "monodepth2/splits")
# Real-world scale factor (see Monodepth2)
STEREO_SCALE_FACTOR = 5.4
uncertainty_metrics = ["abs_rel", "rmse", "a1"]
def evaluate(opt):
"""Evaluates a pretrained model using a specified test set
"""
MIN_DEPTH = 1e-3
MAX_DEPTH = opt.max_depth
assert sum((opt.eval_mono, opt.eval_stereo)) == 1, "Please choose mono or stereo evaluation by setting either --eval_mono or --eval_stereo"
gt_path = os.path.join(splits_dir, opt.eval_split, "gt_depths.npz")
gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1', allow_pickle=True)["data"]
print("-> Loading 16 bit predictions from {}".format(opt.ext_disp_to_eval))
pred_disps = []
pred_uncerts = []
for i in range(len(gt_depths)):
img = cv2.imread(opt.ext_disp_to_eval+'/disp/%06d_10.png'%i,-1)
src = img / 256. / (0.58*gt_depths[i].shape[1]) * 10
pred_disps.append(src)
if opt.eval_uncert:
if opt.grad:
folder_name = "uncert_" + opt.gref + "_" + opt.gloss
if opt.w != 0.0:
folder_name = folder_name + "_weight" + str(opt.w)
folder_name = folder_name + "_layer_" + "_".join(str(x) for x in opt.ext_layer)
elif opt.infer_dropout:
folder_name = "uncert_p_" + str(opt.infer_p)
else:
folder_name = "uncert"
uncert = cv2.imread(opt.ext_disp_to_eval+'/' + folder_name + '/%06d_10.png'%i,-1) / 256.
pred_uncerts.append(uncert)
pred_disps = np.array(pred_disps)
print("-> Evaluating")
if opt.eval_stereo:
print(" Stereo evaluation - "
"disabling median scaling, scaling by {}".format(STEREO_SCALE_FACTOR))
opt.disable_median_scaling = True
opt.pred_depth_scale_factor = STEREO_SCALE_FACTOR
else:
print(" Mono evaluation - using median scaling")
errors = []
errors_abs_rel = []
errors_rmse = []
# dictionary with accumulators for each metric
aucs = {"abs_rel":[], "rmse":[], "a1":[]}
curves = {"abs_rel": [], "rmse": [], "a1":[]}
pred_width, pred_height = pred_disps[0].shape[0], pred_disps[0].shape[1]
bar = progressbar.ProgressBar(max_value=len(gt_depths))
for i in range(len(gt_depths)):
gt_depth = gt_depths[i]
gt_height, gt_width = gt_depth.shape[:2]
bar.update(i)
pred_disp = pred_disps[i]
pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))
pred_depth = 1 / pred_disp
gt_depth_visu = copy.deepcopy(gt_depth)
pred_depth_visu = copy.deepcopy(pred_depth)
mask_visu = gt_depth > 0
gt_depth_visu[~mask_visu] = MIN_DEPTH
pred_depth_visu[~mask_visu] = MIN_DEPTH
# get error maps
tmp_abs_rel, tmp_rmse, tmp_a1 = compute_eigen_errors_visu(gt_depth_visu, pred_depth_visu, mask_visu)
errors_abs_rel.append(tmp_abs_rel)
errors_rmse.append(tmp_rmse)
if opt.eval_uncert:
pred_uncert = pred_uncerts[i]
pred_uncert = cv2.resize(pred_uncert, (gt_width, gt_height))
if opt.eval_split == "eigen":
# traditional eigen crop
mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)
crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,
0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
else:
# just mask out invalid depths
mask = (gt_depth > 0)
# apply masks
pred_depth = pred_depth[mask]
gt_depth = gt_depth[mask]
if opt.eval_uncert:
pred_uncert = pred_uncert[mask]
# apply scale factor and depth cap
pred_depth *= opt.pred_depth_scale_factor
pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH
pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH
# get Eigen's metrics
errors.append(compute_eigen_errors(gt_depth, pred_depth))
if opt.eval_uncert:
# get uncertainty metrics (AUSE and AURG)
scores, spars_plots = compute_aucs(gt_depth, pred_depth, pred_uncert)
# append AUSE and AURG to accumulators
[aucs[m].append(scores[m]) for m in uncertainty_metrics]
[curves[m].append(spars_plots[m]) for m in uncertainty_metrics]
# compute mean depth metrics and print
mean_errors = np.array(errors).mean(0)
print("\n " + ("{:>8} | " * 7).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"))
print(("&{: 8.3f} " * 7).format(*mean_errors.tolist()) + "\\\\")
if opt.eval_uncert:
# compute mean uncertainty metrics and print
for m in uncertainty_metrics:
aucs[m] = np.array(aucs[m]).mean(0)
print("\n " + ("{:>8} | " * 6).format("abs_rel", "", "rmse", "", "a1", ""))
print(" " + ("{:>8} | " * 6).format("AUSE", "AURG", "AUSE", "AURG", "AUSE", "AURG"))
print(("&{:8.3f} " * 6).format(*aucs["abs_rel"].tolist()+aucs["rmse"].tolist()+aucs["a1"].tolist()) + "\\\\")
errors_abs_rel = np.array(errors_abs_rel)
errors_rmse = np.array(errors_rmse)
# save sparsification plots
if not os.path.exists(opt.output_dir):
os.mkdir(opt.output_dir)
pickle.dump(curves, open(os.path.join(opt.output_dir, "spars_plots.pkl"), "wb"))
if opt.save_error_map:
if not os.path.exists(opt.output_dir):
os.mkdir(opt.output_dir)
if not os.path.exists(os.path.join(opt.output_dir, "abs_rel")):
os.makedirs(os.path.join(opt.output_dir, "abs_rel"))
if not os.path.exists(os.path.join(opt.output_dir, "rmse")):
os.makedirs(os.path.join(opt.output_dir, "rmse"))
print("--> Saving qualitative error maps: abs rel")
bar = progressbar.ProgressBar(max_value=len(errors_abs_rel))
for i in range(len(errors_abs_rel)):
bar.update(i)
# save colored depth maps
plt.imsave(os.path.join(opt.output_dir, "abs_rel", '%06d_10.png' % i),
cv2.resize(errors_abs_rel[i], (pred_height, pred_width)), cmap='hot')
print("--> Saving qualitative error maps: rmse")
bar = progressbar.ProgressBar(max_value=len(errors_rmse))
for i in range(len(errors_rmse)):
bar.update(i)
# save colored depth maps
plt.imsave(os.path.join(opt.output_dir, "rmse", '%06d_10.png' % i),
cv2.resize(errors_rmse[i], (pred_height, pred_width)), cmap='hot')
# see you next time!
print("\n-> Done!")
if __name__ == "__main__":
warnings.simplefilter("ignore", UserWarning)
options = UncertaintyOptions()
evaluate(options.parse())
| 8,739 | 37.502203 | 143 | py |
GrUMoDepth | GrUMoDepth-main/generate_maps.py | #
# MIT License
#
# Copyright (c) 2020 Matteo Poggi m.poggi@unibo.it
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Source: modified from https://github.com/mattpoggi/mono-uncertainty
-- added gradient-based uncertainty (grad)
-- added inference only dropout (infer_dropout)
-- added variance over different augmentations (var_aug)
"""
from __future__ import absolute_import, division, print_function
import time
import warnings
import os
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader
import monodepth2
import monodepth2.kitti_utils as kitti_utils
from monodepth2.layers import *
from monodepth2.utils import *
from extended_options import *
import monodepth2.datasets as datasets
import monodepth2.networks as legacy
import networks
import progressbar
import matplotlib.pyplot as plt
from gradients import *
from torchvision import transforms
import sys
splits_dir = os.path.join(os.path.dirname(__file__), "monodepth2/splits")
def batch_post_process_disparity(l_disp, r_disp):
"""Apply the disparity post-processing method as introduced in Monodepthv1
"""
_, h, w = l_disp.shape
m_disp = 0.5 * (l_disp + r_disp)
l, _ = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
l_mask = (1.0 - np.clip(20 * (l - 0.05), 0, 1))[None, ...]
r_mask = l_mask[:, :, ::-1]
return r_mask * l_disp + l_mask * r_disp + (1.0 - l_mask - r_mask) * m_disp
def get_mono_ratio(disp, gt):
"""Returns the median scaling factor
"""
mask = gt > 0
return np.median(gt[mask]) / np.median(cv2.resize(1 / disp, (gt.shape[1], gt.shape[0]))[mask])
def evaluate(opt):
"""Evaluates a pretrained model using a specified test set
"""
MIN_DEPTH = 1e-3
MAX_DEPTH = 80
opt.batch_size = 1
assert sum((opt.eval_mono, opt.eval_stereo, opt.no_eval)) == 1, \
"Please choose mono or stereo evaluation by setting either --eval_mono, --eval_stereo, --custom_run"
assert sum((opt.log, opt.repr)) < 2, \
"Please select only one between LR and LOG by setting --repr or --log"
assert opt.bootstraps == 1 or opt.snapshots == 1, \
"Please set only one of --bootstraps or --snapshots to be major than 1"
# get the number of networks
nets = max(opt.bootstraps, opt.snapshots)
do_uncert = (opt.log or opt.repr or opt.dropout or opt.post_process or opt.bootstraps > 1 or opt.snapshots > 1
or opt.grad or opt.infer_dropout or opt.var_aug)
print("-> Beginning inference...")
opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)
assert os.path.isdir(opt.load_weights_folder), "Cannot find a folder at {}".format(opt.load_weights_folder)
print("-> Loading weights from {}".format(opt.load_weights_folder))
filenames = readlines(os.path.join(splits_dir, opt.eval_split, "test_files.txt"))
if opt.bootstraps > 1:
# prepare multiple checkpoint paths from different trainings
encoder_path = [os.path.join(opt.load_weights_folder, "boot_%d" % i, "weights_19", "encoder.pth") for i in
range(1, opt.bootstraps + 1)]
decoder_path = [os.path.join(opt.load_weights_folder, "boot_%d" % i, "weights_19", "depth.pth") for i in
range(1, opt.bootstraps + 1)]
encoder_dict = [torch.load(encoder_path[i]) for i in range(opt.bootstraps)]
height = encoder_dict[0]['height']
width = encoder_dict[0]['width']
elif opt.snapshots > 1:
# prepare multiple checkpoint paths from the same training
encoder_path = [os.path.join(opt.load_weights_folder, "weights_%d" % i, "encoder.pth") for i in
range(opt.num_epochs - opt.snapshots, opt.num_epochs)]
decoder_path = [os.path.join(opt.load_weights_folder, "weights_%d" % i, "depth.pth") for i in
range(opt.num_epochs - opt.snapshots, opt.num_epochs)]
encoder_dict = [torch.load(encoder_path[i]) for i in range(opt.snapshots)]
height = encoder_dict[0]['height']
width = encoder_dict[0]['width']
else:
# prepare just a single path
encoder_path = os.path.join(opt.load_weights_folder, "encoder.pth")
decoder_path = os.path.join(opt.load_weights_folder, "depth.pth")
encoder_dict = torch.load(encoder_path)
height = encoder_dict['height']
width = encoder_dict['width']
img_ext = '.png' if opt.png else '.jpg'
dataset = datasets.KITTIRAWDataset(opt.data_path, filenames,
height, width,
[0], 4, is_train=False, img_ext=img_ext)
dataloader = DataLoader(dataset, opt.batch_size, shuffle=False, num_workers=opt.num_workers,
pin_memory=True, drop_last=False)
if nets > 1:
# load multiple encoders and decoders
encoder = [legacy.ResnetEncoder(opt.num_layers, False) for i in range(nets)]
depth_decoder = [
networks.DepthUncertaintyDecoder(encoder[i].num_ch_enc, num_output_channels=1,
uncert=(opt.log or opt.repr),
dropout=opt.dropout) for i in range(nets)]
model_dict = [encoder[i].state_dict() for i in range(nets)]
for i in range(nets):
encoder[i].load_state_dict({k: v for k, v in encoder_dict[i].items() if k in model_dict[i]})
depth_decoder[i].load_state_dict(torch.load(decoder_path[i]))
encoder[i].cuda()
encoder[i].eval()
depth_decoder[i].cuda()
depth_decoder[i].eval()
else:
# load a single encoder and decoder
encoder = legacy.ResnetEncoder(opt.num_layers, False)
depth_decoder = networks.DepthUncertaintyDecoder(encoder.num_ch_enc, num_output_channels=1,
uncert=(opt.log or opt.repr or opt.uncert),
dropout=opt.dropout)
if opt.infer_dropout:
# load separate depth deocder if dropout is onl applied during inference
depth_decoder_drop = networks.DepthUncertaintyDecoder(encoder.num_ch_enc, num_output_channels=1,
uncert=(opt.log or opt.repr or opt.uncert),
dropout=opt.dropout, infer_dropout=opt.infer_dropout,
infer_p=opt.infer_p)
depth_decoder_drop.load_state_dict(torch.load(decoder_path))
depth_decoder_drop.cuda()
depth_decoder_drop.eval()
model_dict = encoder.state_dict()
encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict})
depth_decoder.load_state_dict(torch.load(decoder_path))
encoder.cuda()
encoder.eval()
depth_decoder.cuda()
depth_decoder.eval()
# accumulators for depth and uncertainties
pred_disps = []
pred_uncerts = []
if opt.grad:
ext_layer = ['decoder.0.conv', 'decoder.1.conv', 'decoder.2.conv', 'decoder.3.conv', 'decoder.4.conv',
'decoder.5.conv', 'decoder.6.conv', 'decoder.7.conv', 'decoder.8.conv', 'decoder.9.conv',
'decoder.10.conv']
layer_list = [ext_layer[layer_idx] for layer_idx in opt.ext_layer]
gradient_extractor = Gradient_Analysis(depth_decoder, layer_list, height, width, opt.gred)
print("-> Extract gradients from model for uncertainty estimation")
bwd_time = 0
n_samples = 0
if opt.gloss not in ["sq", "none", "var"]:
raise NotImplementedError
for i, data in enumerate(dataloader):
rgb_img = data[("color", 0, 0)].cuda()
if opt.gref == "flip":
# Post-processed results require each image to have two forward passes
ref_img = torch.flip(rgb_img, [3])
with torch.no_grad():
output = depth_decoder(encoder(ref_img))
ref_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth)
ref_disp = ref_disp.squeeze(1)
ref_depth = 1 / ref_disp
ref_depth = ref_depth.cpu().numpy()[:, :, ::-1]
ref_depth = torch.from_numpy(ref_depth.copy()).cuda()
elif opt.gref == "var":
ref_imgs = [torch.flip(rgb_img, [3]), transforms.Grayscale(num_output_channels=3)(rgb_img),
rgb_img + torch.normal(0.0, 0.01, rgb_img.size()).cuda(),
transforms.functional.rotate(rgb_img, 10)]
ref_depths = []
with torch.no_grad():
for j, input in enumerate(ref_imgs):
output = depth_decoder(encoder(input))
ref_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth)
if j == 3:
ref_disp = transforms.functional.rotate(ref_disp, -10)
ref_disp = ref_disp.squeeze(1)
ref_depth = 1 / ref_disp
if j == 0:
ref_depth = ref_depth.cpu().numpy()[:, :, ::-1]
ref_depth = torch.from_numpy(ref_depth.copy()).cuda()
ref_depths.append(ref_depth)
output = gradient_extractor(encoder(rgb_img))
pred_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth)
pred_disp = pred_disp.squeeze(1)
pred_depth = 1 / pred_disp
n_samples += rgb_img.shape[0]
loss = 0
if opt.gloss == "var":
loss = torch.var(torch.cat([pred_depth, ref_depths[0], ref_depths[1], ref_depths[2], ref_depths[3]], 0), dim=0)
loss = torch.mean(loss)
else:
if opt.gloss == "sq":
depth_diff = squared_difference(pred_depth, ref_depth)
loss += torch.mean(depth_diff)
if opt.uncert and opt.w != 0.0:
pred_uncert = output[("uncert", 0)].squeeze(1)
uncert = torch.exp(pred_uncert) ** 2
loss += (opt.w * torch.mean(uncert))
start_time = time.time()
loss.backward()
stop_time = time.time()
bwd_time += (stop_time - start_time)
pred_uncerts = gradient_extractor.get_gradients()
bwd_time = bwd_time / len(dataloader)
print('\nAverage backward time: {:.2f} ms'.format(bwd_time * 1000))
print("-> Computing predictions with size {}x{}".format(width, height))
fwd_time = 0
with torch.no_grad():
bar = progressbar.ProgressBar(max_value=len(dataloader))
for i, data in enumerate(dataloader):
input_color = data[("color", 0, 0)].cuda()
# updating progress bar
bar.update(i)
if opt.post_process:
# post-processed results require each image to have two forward passes
input_color = torch.cat((input_color, torch.flip(input_color, [3])), 0)
if nets > 1:
# infer multiple predictions from multiple networks
disps_distribution = []
uncerts_distribution = []
for i in range(nets):
start_time = time.time()
output = depth_decoder[i](encoder[i](input_color))
stop_time = time.time()
disps_distribution.append(torch.unsqueeze(output[("disp", 0)], 0))
if opt.log:
uncerts_distribution.append(torch.unsqueeze(torch.exp(output[("uncert", 0)]) ** 2, 0))
disps_distribution = torch.cat(disps_distribution, 0)
if opt.log:
# bayesian uncertainty
pred_uncert = torch.var(disps_distribution, dim=0, keepdim=False) + torch.sum(
torch.cat(uncerts_distribution, 0), dim=0, keepdim=False)
else:
# uncertainty as variance of the predictions
pred_uncert = torch.var(disps_distribution, dim=0, keepdim=False)
pred_uncert = pred_uncert.cpu()[0].numpy()
output = torch.mean(disps_distribution, dim=0, keepdim=False)
pred_disp, _ = disp_to_depth(output, opt.min_depth, opt.max_depth)
elif opt.dropout:
# infer multiple predictions from multiple networks with dropout
disps_distribution = []
# we infer 8 predictions as the number of bootstraps and snaphots
for j in range(8):
start_time = time.time()
output = depth_decoder(encoder(input_color))
stop_time = time.time()
disps_distribution.append(torch.unsqueeze(output[("disp", 0)], 0))
disps_distribution = torch.cat(disps_distribution, 0)
# uncertainty as variance of the predictions
pred_uncert = torch.var(disps_distribution, dim=0, keepdim=False).cpu()[0].numpy()
# depth as mean of the predictions
output = torch.mean(disps_distribution, dim=0, keepdim=False)
pred_disp, _ = disp_to_depth(output, opt.min_depth, opt.max_depth)
elif opt.infer_dropout:
# get prediction with normal model
start_time = time.time()
output = depth_decoder(encoder(input_color))
stop_time = time.time()
pred_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth)
# infer multiple predictions from multiple networks with dropout
disps_distribution = []
# we infer 8 predictions as the number of bootstraps and snaphots
for j in range(8):
output = depth_decoder_drop(encoder(input_color))
disps_distribution.append(torch.unsqueeze(output[("disp", 0)], 0))
disps_distribution = torch.cat(disps_distribution, 0)
# uncertainty as variance of the predictions
pred_uncert = torch.var(disps_distribution, dim=0, keepdim=False).cpu()[0].numpy()
elif opt.var_aug:
# variance over different augmentations
start_time = time.time()
disps_distribution = []
# normal depth
output = depth_decoder(encoder(input_color))
disp_output = output[("disp", 0)]
pred_disp, _ = disp_to_depth(disp_output, opt.min_depth, opt.max_depth)
disps_distribution.append(torch.unsqueeze(disp_output, 0))
# first augmentation: flipping
rgb_input = torch.flip(input_color, [3])
output = depth_decoder(encoder(rgb_input))
disp_output = output[("disp", 0)]
disps_distribution.append(torch.unsqueeze(torch.flip(disp_output, [3]), 0))
# second augmentation: gray-scale
rgb_input = transforms.Grayscale(num_output_channels=3)(input_color)
output = depth_decoder(encoder(rgb_input))
disp_output = output[("disp", 0)]
disps_distribution.append(torch.unsqueeze(disp_output, 0))
# third augmentation: additive noise
rgb_input = input_color + torch.normal(0.0, 0.01, input_color.size()).cuda()
output = depth_decoder(encoder(rgb_input))
disp_output = output[("disp", 0)]
disps_distribution.append(torch.unsqueeze(disp_output, 0))
# last augmentation: rotation
rgb_input = transforms.functional.rotate(input_color, 10)
output = depth_decoder(encoder(rgb_input))
disp_output = output[("disp", 0)]
disps_distribution.append(torch.unsqueeze(transforms.functional.rotate(disp_output, -10), 0))
disps_distribution = torch.cat(disps_distribution, 0)
pred_uncert = torch.var(disps_distribution, dim=0, keepdim=False).cpu()[:, 0].numpy()
pred_uncert = (pred_uncert - np.min(pred_uncert)) / (np.max(pred_uncert) - np.min(pred_uncert))
pred_uncerts.append(pred_uncert)
stop_time = time.time()
else:
start_time = time.time()
output = depth_decoder(encoder(input_color))
stop_time = time.time()
pred_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth)
if opt.log:
# log-likelihood maximization
pred_uncert = torch.exp(output[("uncert", 0)]).cpu()[:, 0].numpy()
elif opt.repr:
# learned reprojection
pred_uncert = (output[("uncert", 0)]).cpu()[:, 0].numpy()
fwd_time += (stop_time - start_time)
pred_disp = pred_disp.cpu()[:, 0].numpy()
if opt.post_process:
# applying Monodepthv1 post-processing to improve depth and get uncertainty
N = pred_disp.shape[0] // 2
pred_uncert = np.abs(pred_disp[:N] - pred_disp[N:, :, ::-1])
pred_disp = batch_post_process_disparity(pred_disp[:N], pred_disp[N:, :, ::-1])
pred_uncerts.append(pred_uncert)
pred_disps.append(pred_disp)
# uncertainty normalization
if opt.log or opt.repr or opt.dropout or opt.infer_dropout or nets > 1:
pred_uncert = (pred_uncert - np.min(pred_uncert)) / (np.max(pred_uncert) - np.min(pred_uncert))
pred_uncerts.append(pred_uncert)
pred_disps = np.concatenate(pred_disps)
fwd_time = fwd_time / len(dataset)
print('\nAverage inference: {:.2f} ms'.format(fwd_time * 1000))
if do_uncert and not opt.grad:
pred_uncerts = np.concatenate(pred_uncerts)
# saving 16 bit depth and uncertainties
print("-> Saving 16 bit maps")
gt_path = os.path.join(splits_dir, opt.eval_split, "gt_depths.npz")
gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1', allow_pickle=True)["data"]
if not os.path.exists(os.path.join(opt.output_dir, "raw", "disp")):
os.makedirs(os.path.join(opt.output_dir, "raw", "disp"))
if opt.grad:
folder_name = "uncert_" + opt.gref + "_" + opt.gloss
if opt.w != 0.0:
folder_name = folder_name + "_weight" + str(opt.w)
folder_name = folder_name + "_layer_" + "_".join(str(x) for x in opt.ext_layer)
elif opt.infer_dropout:
folder_name = "uncert_p_" + str(opt.infer_p)
else:
folder_name = "uncert"
if not os.path.exists(os.path.join(opt.output_dir, "raw", folder_name)):
os.makedirs(os.path.join(opt.output_dir, "raw", folder_name))
if opt.qual:
if not os.path.exists(os.path.join(opt.output_dir, "qual", "disp")):
os.makedirs(os.path.join(opt.output_dir, "qual", "disp"))
if do_uncert:
if opt.grad:
folder_name = "uncert_" + opt.gref + "_" + opt.gloss
if opt.w != 0.0:
folder_name = folder_name + "_weight" + str(opt.w)
folder_name = folder_name + "_layer_" + "_".join(str(x) for x in opt.ext_layer)
elif opt.infer_dropout:
folder_name = "uncert_p_" + str(opt.infer_p)
else:
folder_name = "uncert"
if not os.path.exists(os.path.join(opt.output_dir, "qual", folder_name)):
os.makedirs(os.path.join(opt.output_dir, "qual", folder_name))
bar = progressbar.ProgressBar(max_value=len(pred_disps))
for i in range(len(pred_disps)):
bar.update(i)
if opt.eval_stereo:
# save images scaling with KITTI baseline
cv2.imwrite(os.path.join(opt.output_dir, "raw", "disp", '%06d_10.png' % i),
(pred_disps[i] * (dataset.K[0][0] * gt_depths[i].shape[1]) * 256. / 10).astype(np.uint16))
elif opt.eval_mono:
# save images scaling with ground truth median
ratio = get_mono_ratio(pred_disps[i], gt_depths[i])
cv2.imwrite(os.path.join(opt.output_dir, "raw", "disp", '%06d_10.png' % i),
(pred_disps[i] * (dataset.K[0][0] * gt_depths[i].shape[1]) * 256. / ratio / 10.).astype(
np.uint16))
else:
# save images scaling with custom factor
cv2.imwrite(os.path.join(opt.output_dir, "raw", "disp", '%06d_10.png' % i),
(pred_disps[i] * (opt.custom_scale) * 256. / 10).astype(np.uint16))
if do_uncert:
# save uncertainties
if opt.grad or opt.infer_dropout:
cv2.imwrite(os.path.join(opt.output_dir, "raw", folder_name, '%06d_10.png' % i),
(pred_uncerts[i] * (256 * 256 - 1)).astype(np.uint16))
else:
cv2.imwrite(os.path.join(opt.output_dir, "raw", folder_name, '%06d_10.png' % i),
(pred_uncerts[i] * (256 * 256 - 1)).astype(np.uint16))
if opt.qual:
# save colored depth maps
plt.imsave(os.path.join(opt.output_dir, "qual", "disp", '%06d_10.png' % i), pred_disps[i], cmap='magma')
if do_uncert:
# save colored uncertainty maps
plt.imsave(os.path.join(opt.output_dir, "qual", folder_name, '%06d_10.png' % i), pred_uncerts[i],
cmap='hot')
# see you next time!
print("\n-> Done!")
if __name__ == "__main__":
warnings.simplefilter("ignore", UserWarning)
options = UncertaintyOptions()
evaluate(options.parse())
| 23,210 | 45.890909 | 127 | py |
GrUMoDepth | GrUMoDepth-main/trainer_supervised.py | # Monodepth2 to train in a supervised manner on NYU Depth V2
# Copyright Niantic 2019. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
"""
Source: modified from https://github.com/nianticlabs/monodepth2
"""
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from monodepth2.layers import compute_depth_errors
from monodepth2.trainer import Trainer as base_trainer
import monodepth2.networks as legacy
import networks as networks
from datasets.nyu_dataset import NYUDataset
def uncertainty_loss(pred, gt, uct):
abs_diff = torch.abs(pred - gt)
loss = (abs_diff / torch.exp(uct)) + uct
loss = torch.mean(loss, dim=[1, 2, 3])
loss = torch.mean(loss)
return loss
class Trainer(base_trainer):
def __init__(self, options):
self.opt = options
self.log_path = os.path.join(self.opt.log_dir, self.opt.model_name)
# checking height and width are multiples of 32
assert self.opt.height % 32 == 0, "'height' must be a multiple of 32"
assert self.opt.width % 32 == 0, "'width' must be a multiple of 32"
self.models = {}
self.parameters_to_train = []
self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")
self.num_scales = len(self.opt.scales)
self.models["encoder"] = legacy.ResnetEncoder(self.opt.num_layers, self.opt.weights_init == "pretrained")
self.models["encoder"].to(self.device)
self.parameters_to_train += list(self.models["encoder"].parameters())
self.models["depth"] = networks.DepthUncertaintyDecoder_Supervised(self.models["encoder"].num_ch_enc,
self.opt.scales, dropout=self.opt.dropout,
uncert=self.opt.uncert)
self.models["depth"].to(self.device)
self.parameters_to_train += list(self.models["depth"].parameters())
self.model_optimizer = optim.Adam(self.parameters_to_train, self.opt.learning_rate)
self.model_lr_scheduler = optim.lr_scheduler.StepLR(
self.model_optimizer, self.opt.scheduler_step_size, 0.1)
if self.opt.load_weights_folder is not None:
self.load_model()
print("Training model named:\n ", self.opt.model_name)
print("Models and tensorboard events files are saved to:\n ", self.opt.log_dir)
print("Training is using:\n ", self.device)
train_dataset = NYUDataset(self.opt.data_path + '/train/', split='train', height=self.opt.height,
width=self.opt.width)
num_train_samples = len(train_dataset)
val_dataset = NYUDataset(self.opt.data_path + '/train/', split='holdout', height=self.opt.height,
width=self.opt.width)
self.num_total_steps = num_train_samples // self.opt.batch_size * self.opt.num_epochs
self.train_loader = DataLoader(
train_dataset, self.opt.batch_size, True,
num_workers=self.opt.num_workers, pin_memory=True, drop_last=True)
self.val_loader = DataLoader(
val_dataset, self.opt.batch_size, True,
num_workers=self.opt.num_workers, pin_memory=True, drop_last=True)
self.val_iter = iter(self.val_loader)
self.writers = {}
for mode in ["train", "val"]:
self.writers[mode] = SummaryWriter(os.path.join(self.log_path, mode))
self.depth_metric_names = [
"de/abs_rel", "de/sq_rel", "de/rms", "de/log_rms", "da/a1", "da/a2", "da/a3"]
print("Using split:\n ", self.opt.split)
print("There are {:d} training items and {:d} validation items\n".format(
len(train_dataset), len(val_dataset)))
self.save_opts()
def train(self):
"""Run the training pipeline in a supervised manner
"""
self.epoch = 0
self.step = 0
self.start_time = time.time()
if self.opt.uncert:
criterion = uncertainty_loss
else:
criterion = nn.L1Loss()
for self.epoch in range(self.opt.num_epochs):
self.model_lr_scheduler.step()
print("Training")
self.set_train()
for batch_idx, inputs in enumerate(self.train_loader):
rgb_img = inputs[("color", 0, 0)].cuda()
gt_depth = inputs["depth_gt"].cuda()
before_op_time = time.time()
# Otherwise, we only feed the image with frame_id 0 through the depth encoder
features = self.models["encoder"](rgb_img)
outputs = self.models["depth"](features)
total_loss = 0
losses = {}
for scale in self.opt.scales:
depth = outputs[("depth", scale)]
depth = F.interpolate(depth, [self.opt.height, self.opt.width], mode="bilinear",
align_corners=False)
if self.opt.uncert:
uncert = outputs[("uncert", scale)]
uncert = F.interpolate(uncert, [self.opt.height, self.opt.width], mode="bilinear",
align_corners=False)
loss = criterion(depth, gt_depth, uncert)
else:
loss = criterion(depth, gt_depth)
total_loss += loss
losses["loss/{}".format(scale)] = loss
total_loss /= self.num_scales
losses["loss"] = total_loss
self.model_optimizer.zero_grad()
total_loss.backward()
self.model_optimizer.step()
duration = time.time() - before_op_time
# log less frequently after the first 2000 steps to save time & disk space
early_phase = batch_idx % self.opt.log_frequency == 0 and self.step < 2000
late_phase = self.step % 2000 == 0
if early_phase or late_phase:
self.log_time(batch_idx, duration, losses["loss"].cpu().data)
if "depth_gt" in inputs:
self.compute_depth_losses(inputs, outputs, losses)
self.log("train", inputs, outputs, losses)
self.val()
self.step += 1
if (self.epoch + 1) % self.opt.save_frequency == 0:
self.save_model()
def val(self):
"""Validate the model on a single minibatch
"""
if self.opt.uncert:
criterion = uncertainty_loss
else:
criterion = nn.L1Loss()
self.set_eval()
try:
inputs = self.val_iter.next()
except StopIteration:
self.val_iter = iter(self.val_loader)
inputs = self.val_iter.next()
with torch.no_grad():
rgb_img = inputs[("color", 0, 0)].to(self.device)
gt_depth = inputs["depth_gt"].to(self.device)
# Otherwise, we only feed the image with frame_id 0 through the depth encoder
features = self.models["encoder"](rgb_img)
outputs = self.models["depth"](features)
total_loss = 0
losses = {}
for scale in self.opt.scales:
depth = outputs[("depth", scale)]
depth = F.interpolate(depth, [self.opt.height, self.opt.width], mode="bilinear",
align_corners=False)
if self.opt.uncert:
uncert = outputs[("uncert", scale)]
uncert = F.interpolate(uncert, [self.opt.height, self.opt.width], mode="bilinear",
align_corners=False)
loss = criterion(depth, gt_depth, uncert)
else:
loss = criterion(depth, gt_depth)
total_loss += loss
losses["loss/{}".format(scale)] = loss
total_loss /= self.num_scales
losses["loss"] = total_loss
if "depth_gt" in inputs:
self.compute_depth_losses(inputs, outputs, losses)
self.log("val", inputs, outputs, losses)
del inputs, outputs, losses
self.set_train()
def compute_depth_losses(self, inputs, outputs, losses):
"""Compute depth metrics, to allow monitoring during training
This isn't particularly accurate as it averages over the entire batch,
so is only used to give an indication of validation performance
"""
depth_pred = outputs[("depth", 0)].detach()
depth_gt = inputs["depth_gt"].to(self.device)
mask = depth_gt > 0
depth_gt = depth_gt[mask]
depth_pred = depth_pred[mask]
depth_pred = torch.clamp(depth_pred, min=1e-3, max=10)
depth_errors = compute_depth_errors(depth_gt, depth_pred)
for i, metric in enumerate(self.depth_metric_names):
losses[metric] = np.array(depth_errors[i].cpu())
def log(self, mode, inputs, outputs, losses):
"""Write an event to the tensorboard events file
"""
writer = self.writers[mode]
for l, v in losses.items():
writer.add_scalar("{}".format(l), v, self.step)
for j in range(min(4, self.opt.batch_size)): # write a maxmimum of four images
writer.add_image("color/{}".format(j), inputs[("color", 0, 0)][j].data, self.step)
for s in self.opt.scales:
writer.add_image("depth_{}/{}".format(s, j), outputs[("depth", s)][j].data, self.step)
| 10,208 | 38.569767 | 117 | py |
GrUMoDepth | GrUMoDepth-main/evaluate_supervised.py | from __future__ import absolute_import, division, print_function
import warnings
import pickle
from torch.utils.data import DataLoader
from extended_options import *
import datasets as datasets
import monodepth2.networks as legacy
import networks as networks
import progressbar
import matplotlib.pyplot as plt
from gradients import *
from torchvision import transforms
from eval_utils import compute_eigen_errors, compute_eigen_errors_visu, compute_aucs
import sys
uncertainty_metrics = ["abs_rel", "rmse", "a1"]
splits_dir = os.path.join(os.path.dirname(__file__), "monodepth2/splits")
def batch_post_process_depth(l_depth, r_depth):
"""Apply the disparity post-processing method as introduced in Monodepthv1
"""
_, h, w = l_depth.shape
m_depth = 0.5 * (l_depth + r_depth)
l, _ = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
l_mask = (1.0 - np.clip(20 * (l - 0.05), 0, 1))[None, ...]
r_mask = l_mask[:, :, ::-1]
return r_mask * l_depth + l_mask * r_depth + (1.0 - l_mask - r_mask) * m_depth
def evaluate(opt):
"""Evaluates a pretrained model using a specified test set
"""
MIN_DEPTH = 1e-3
MAX_DEPTH = opt.max_depth
opt.batch_size = 1
print("-> Beginning inference...")
opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)
assert os.path.isdir(opt.load_weights_folder), "Cannot find a folder at {}".format(opt.load_weights_folder)
print("-> Loading weights from {}".format(opt.load_weights_folder))
# prepare just a single path
encoder_path = os.path.join(opt.load_weights_folder, "encoder.pth")
decoder_path = os.path.join(opt.load_weights_folder, "depth.pth")
encoder_dict = torch.load(encoder_path)
height = encoder_dict['height']
width = encoder_dict['width']
dataset = datasets.NYUDataset(opt.data_path + '/val/', split='val', height=height, width=width)
dataloader = DataLoader(dataset, opt.batch_size, shuffle=False, num_workers=opt.num_workers, pin_memory=True,
drop_last=False)
# load a single encoder and decoder
encoder = legacy.ResnetEncoder(opt.num_layers, False)
depth_decoder = networks.DepthUncertaintyDecoder_Supervised(encoder.num_ch_enc, scales=opt.scales,
dropout=opt.dropout, uncert=opt.uncert)
if opt.infer_dropout:
depth_decoder_drop = networks.DepthUncertaintyDecoder_Supervised(encoder.num_ch_enc, scales=opt.scales,
dropout=opt.dropout, uncert=opt.uncert,
infer_dropout=opt.infer_dropout,
infer_p=opt.infer_p)
depth_decoder_drop.load_state_dict(torch.load(decoder_path))
depth_decoder_drop.cuda()
depth_decoder_drop.eval()
model_dict = encoder.state_dict()
encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict})
depth_decoder.load_state_dict(torch.load(decoder_path))
encoder.cuda()
encoder.eval()
depth_decoder.cuda()
depth_decoder.eval()
# accumulators for depth and uncertainties
pred_depths = []
pred_uncerts = []
if opt.grad:
ext_layer = ['decoder.0.conv', 'decoder.1.conv', 'decoder.2.conv', 'decoder.3.conv', 'decoder.4.conv',
'decoder.5.conv', 'decoder.6.conv', 'decoder.7.conv', 'decoder.8.conv', 'decoder.9.conv',
'decoder.10.conv']
layer_list = [ext_layer[layer_idx] for layer_idx in opt.ext_layer]
gradient_extractor = Gradient_Analysis(depth_decoder, layer_list, encoder_dict['height'],
encoder_dict['width'], opt.gred)
print("-> Extract gradients from model for uncertainty estimation")
bwd_time = 0
n_samples = 0
# check loss function type
if opt.gloss not in ["sq", "none", "var"]:
raise NotImplementedError
for i, inputs in enumerate(dataloader):
rgb_img = inputs[("color", 0, 0)].cuda()
gt_depth = inputs["depth_gt"]
if opt.gref == "flip":
ref_img = torch.flip(rgb_img, [3])
elif opt.gref == "gray":
ref_img = transforms.Grayscale(num_output_channels=3)(rgb_img)
elif opt.gref == "noise":
ref_img = rgb_img + torch.normal(0.0, 0.01, rgb_img.size()).cuda()
elif opt.gref == "rot":
ref_img = transforms.functional.rotate(rgb_img, angle=opt.angle)
elif opt.gref == "var":
ref_imgs = [torch.flip(rgb_img, [3]), transforms.Grayscale(num_output_channels=3)(rgb_img),
rgb_img + torch.normal(0.0, 0.01, rgb_img.size()).cuda(),
transforms.functional.rotate(rgb_img, 10)]
elif opt.gref in ["none", "gt"]:
pass
else:
raise NotImplementedError
if opt.gref in ["flip", "gray", "noise", "rot"]:
with torch.no_grad():
output = depth_decoder(encoder(ref_img))
ref_depth = output[("depth", 0)]
if opt.uncert:
ref_uncert = output[("uncert", 0)]
if opt.gref == "flip":
ref_depth = torch.from_numpy(ref_depth.cpu().numpy()[:, :, :, ::-1].copy()).cuda()
if opt.uncert:
ref_uncert = torch.from_numpy(ref_uncert.cpu().numpy()[:, :, :, ::-1].copy()).cuda()
elif opt.gref == "rot":
ref_depth = transforms.functional.rotate(ref_depth, -opt.angle)
if opt.uncert:
ref_uncert = transforms.functional.rotate(ref_uncert, -opt.angle)
elif opt.gref == "var":
ref_depths = []
with torch.no_grad():
for i, input in enumerate(ref_imgs):
output = depth_decoder(encoder(input))
if i == 0:
ref_depths.append(torch.flip(output[("depth", 0)], [3]))
elif i == 3:
ref_depths.append(transforms.functional.rotate(output[("depth", 0)], -10))
else:
ref_depths.append(output[("depth", 0)])
elif opt.gref == "gt":
ref_depth = gt_depth.cuda()
elif opt.gref == "none":
if opt.gloss != "none":
print("Gradient reference required for loss calculation.")
raise NotImplementedError
else:
raise NotImplementedError
output = gradient_extractor(encoder(rgb_img))
pred_depth = output[("depth", 0)]
n_samples += rgb_img.shape[0]
loss = 0
if opt.gloss == "var":
loss = torch.var(torch.cat([pred_depth, ref_depths[0], ref_depths[1], ref_depths[2], ref_depths[3]], 0),
dim=0)
loss = torch.mean(loss)
else:
if opt.gloss == "sq":
depth_diff = squared_difference(pred_depth, ref_depth)
loss += torch.mean(depth_diff)
if opt.uncert and opt.w != 0.0:
pred_uncert = output[("uncert", 0)]
uncert = torch.exp(pred_uncert) ** 2
loss += (opt.w * torch.mean(uncert))
start_time = time.time()
loss.backward()
stop_time = time.time()
bwd_time += (stop_time - start_time)
pred_uncerts = gradient_extractor.get_gradients()
bwd_time = bwd_time / len(dataloader)
print('\nAverage backward time: {:.2f} ms'.format(bwd_time * 1000))
print("-> Computing predictions with size {}x{}".format(width, height))
fwd_time = 0
errors = []
errors_abs_rel = []
errors_rmse = []
# dictionary with accumulators for each metric
aucs = {"abs_rel": [], "rmse": [], "a1": []}
curves = {"abs_rel": [], "rmse": [], "a1":[]}
with torch.no_grad():
bar = progressbar.ProgressBar(max_value=len(dataloader))
for i, inputs in enumerate(dataloader):
rgb_img = inputs[("color", 0, 0)]
gt_depth = inputs["depth_gt"]
gt_depth = gt_depth[:, 0].cpu().numpy()
rgb_img = rgb_img.cuda()
# updating progress bar
bar.update(i)
if opt.post_process:
# post-processed results require each image to have two forward passes
rgb_img = torch.cat((rgb_img, torch.flip(rgb_img, [3])), 0)
if opt.dropout:
# infer multiple predictions from multiple networks with dropout
depth_distribution = []
# we infer 8 predictions as the number of bootstraps and snaphots
for j in range(8):
start_time = time.time()
output = depth_decoder(encoder(rgb_img))
stop_time = time.time()
depth_distribution.append(torch.unsqueeze(output[("depth", 0)], 0))
depth_distribution = torch.cat(depth_distribution, 0)
# uncertainty as variance of the predictions
pred_uncert = torch.var(depth_distribution, dim=0, keepdim=False).cpu()[:, 0].numpy()
pred_uncert = (pred_uncert - np.min(pred_uncert)) / (np.max(pred_uncert) - np.min(pred_uncert))
pred_uncerts.append(pred_uncert)
# depth as mean of the predictions
pred_depth = torch.mean(depth_distribution, dim=0, keepdim=False).cpu()[:, 0].numpy()
elif opt.infer_dropout:
start_time = time.time()
output = depth_decoder(encoder(rgb_img))
stop_time = time.time()
pred_depth = output[("depth", 0)][:, 0].cpu().numpy()
# infer multiple predictions from multiple networks with dropout
depth_distribution = []
# we infer 8 predictions as the number of bootstraps and snaphots
for j in range(8):
start_time = time.time()
output = depth_decoder_drop(encoder(rgb_img))
stop_time = time.time()
depth_distribution.append(torch.unsqueeze(output[("depth", 0)], 0))
depth_distribution = torch.cat(depth_distribution, 0)
# uncertainty as variance of the predictions
pred_uncert = torch.var(depth_distribution, dim=0, keepdim=False).cpu()[:, 0].numpy()
pred_uncert = (pred_uncert - np.min(pred_uncert)) / (np.max(pred_uncert) - np.min(pred_uncert))
pred_uncerts.append(pred_uncert)
elif opt.var_aug:
start_time = time.time()
depth_distribution = []
# normal depth
output = depth_decoder(encoder(rgb_img))
depth_output = output[("depth", 0)]
pred_depth = depth_output[:, 0].cpu().numpy()
depth_distribution.append(torch.unsqueeze(depth_output, 0))
# first augmentation: flipping
rgb_input = torch.flip(rgb_img, [3])
output = depth_decoder(encoder(rgb_input))
depth_output = output[("depth", 0)]
depth_distribution.append(torch.unsqueeze(torch.flip(depth_output, [3]), 0))
# second augmentation: gray-scale
rgb_input = transforms.Grayscale(num_output_channels=3)(rgb_img)
output = depth_decoder(encoder(rgb_input))
depth_output = output[("depth", 0)]
depth_distribution.append(torch.unsqueeze(depth_output, 0))
# third augmentation: additive noise
rgb_input = rgb_img + torch.normal(0.0, 0.01, rgb_img.size()).cuda()
output = depth_decoder(encoder(rgb_input))
depth_output = output[("depth", 0)]
depth_distribution.append(torch.unsqueeze(depth_output, 0))
# last augmentation: rotation
rgb_input = transforms.functional.rotate(rgb_img, 10)
output = depth_decoder(encoder(rgb_input))
depth_output = output[("depth", 0)]
depth_distribution.append(torch.unsqueeze(transforms.functional.rotate(depth_output, -10), 0))
depth_distribution = torch.cat(depth_distribution, 0)
pred_uncert = torch.var(depth_distribution, dim=0, keepdim=False).cpu()[:, 0].numpy()
pred_uncert = (pred_uncert - np.min(pred_uncert)) / (np.max(pred_uncert) - np.min(pred_uncert))
pred_uncerts.append(pred_uncert)
stop_time = time.time()
else:
start_time = time.time()
output = depth_decoder(encoder(rgb_img))
stop_time = time.time()
pred_depth = output[("depth", 0)][:, 0]
pred_depth = pred_depth.cpu().numpy()
fwd_time += (stop_time - start_time)
if opt.post_process:
# applying Monodepthv1 post-processing to improve depth and get uncertainty
N = pred_depth.shape[0] // 2
pred_uncert = np.abs(pred_depth[:N] - pred_depth[N:, :, ::-1])
pred_depth = batch_post_process_depth(pred_depth[:N], pred_depth[N:, :, ::-1])
pred_uncerts.append(pred_uncert)
# only needed is maps are saved
pred_depths.append(pred_depth)
if opt.log:
pred_uncert = torch.exp(output[("uncert", 0)])[:,0].cpu().numpy()
pred_uncert = (pred_uncert - np.min(pred_uncert)) / (np.max(pred_uncert) - np.min(pred_uncert))
pred_uncerts.append(pred_uncert)
if opt.grad:
pred_uncert = pred_uncerts[i].reshape(1, pred_uncerts.shape[1], pred_uncerts.shape[2])
# traditional eigen crop
mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)
# get error maps
tmp_abs_rel, tmp_rmse, tmp_a1 = compute_eigen_errors_visu(gt_depth, pred_depth)
errors_abs_rel.append(tmp_abs_rel)
errors_rmse.append(tmp_rmse)
# apply masks
pred_depth = pred_depth[mask]
gt_depth = gt_depth[mask]
if opt.eval_uncert:
pred_uncert = pred_uncert[mask]
# apply depth cap
pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH
pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH
# get Eigen's metrics
errors.append(compute_eigen_errors(gt_depth, pred_depth))
if opt.eval_uncert:
# get uncertainty metrics (AUSE and AURG)
scores, spars_plots = compute_aucs(gt_depth, pred_depth, pred_uncert)
# append AUSE and AURG to accumulators
[aucs[m].append(scores[m]) for m in uncertainty_metrics]
[curves[m].append(spars_plots[m]) for m in uncertainty_metrics]
fwd_time = fwd_time / len(dataset)
print('\nAverage inference: {:.2f} ms'.format(fwd_time * 1000))
if type(pred_uncerts) == list:
pred_uncerts = np.concatenate(pred_uncerts)
pred_depths = np.concatenate(pred_depths)
if opt.save_error_map:
errors_abs_rel = np.concatenate(errors_abs_rel)
errors_rmse = np.concatenate(errors_rmse)
# compute mean depth metrics and print
mean_errors = np.array(errors).mean(0)
print("\n " + ("{:>8} | " * 7).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"))
print(("&{: 8.3f} " * 7).format(*mean_errors.tolist()) + "\\\\")
# pred_depths = np.concatenate(pred_depths)
if opt.eval_uncert:
# compute mean uncertainty metrics and print
for m in uncertainty_metrics:
aucs[m] = np.array(aucs[m]).mean(0)
print("\n " + ("{:>8} | " * 6).format("abs_rel", "", "rmse", "", "a1", ""))
print(" " + ("{:>8} | " * 6).format("AUSE", "AURG", "AUSE", "AURG", "AUSE", "AURG"))
print(
("&{:8.3f} " * 6).format(*aucs["abs_rel"].tolist() + aucs["rmse"].tolist() + aucs["a1"].tolist()) + "\\\\")
# save sparsification plots
if not os.path.exists(opt.output_dir):
os.mkdir(opt.output_dir)
pickle.dump(curves, open(os.path.join(opt.output_dir, "spars_plots.pkl"), "wb"))
if opt.save_depth_map:
# check if output directory exists
if not os.path.exists(opt.output_dir):
os.mkdir(opt.output_dir)
# only save qualitative results
if not os.path.exists(os.path.join(opt.output_dir, "depth")):
os.makedirs(os.path.join(opt.output_dir, "depth"))
print("--> Saving qualitative depth maps")
bar = progressbar.ProgressBar(max_value=len(pred_depths))
for i in range(len(pred_depths)):
bar.update(i)
# save colored depth maps
plt.imsave(os.path.join(opt.output_dir, "depth", '%06d_10.png' % i), pred_depths[i],
cmap='magma_r')
if opt.save_error_map:
if not os.path.exists(opt.output_dir):
os.mkdir(opt.output_dir)
if not os.path.exists(os.path.join(opt.output_dir, "abs_rel")):
os.makedirs(os.path.join(opt.output_dir, "abs_rel"))
if not os.path.exists(os.path.join(opt.output_dir, "rmse")):
os.makedirs(os.path.join(opt.output_dir, "rmse"))
if not os.path.exists(os.path.join(opt.output_dir, "a1")):
os.makedirs(os.path.join(opt.output_dir, "a1"))
print("--> Saving qualitative error maps: abs rel")
bar = progressbar.ProgressBar(max_value=len(errors_abs_rel))
for i in range(len(errors_abs_rel)):
bar.update(i)
# save colored depth maps
plt.imsave(os.path.join(opt.output_dir, "abs_rel", '%06d_10.png' % i), errors_abs_rel[i], cmap='hot')
print("--> Saving qualitative error maps: rmse")
bar = progressbar.ProgressBar(max_value=len(errors_rmse))
for i in range(len(errors_rmse)):
bar.update(i)
# save colored depth maps
plt.imsave(os.path.join(opt.output_dir, "rmse", '%06d_10.png' % i), errors_rmse[i], cmap='hot')
if opt.save_uncert_map:
# check if output directory exists
if not os.path.exists(opt.output_dir):
os.mkdir(opt.output_dir)
if opt.grad:
folder_name = "uncert_" + opt.gref + "_" + opt.gloss + "_" + opt.gred
if opt.w != 0.0:
folder_name = folder_name + "_weight" + str(opt.w)
folder_name = folder_name + "_layer_" + "_".join(str(x) for x in opt.ext_layer)
elif opt.infer_dropout:
folder_name = "uncert_p_" + str(opt.infer_p)
else:
folder_name = "uncert"
if not os.path.exists(os.path.join(opt.output_dir, folder_name)):
os.makedirs(os.path.join(opt.output_dir, folder_name))
print("--> Saving qualitative uncertainty maps")
bar = progressbar.ProgressBar(max_value=len(pred_uncerts))
for i in range(len(pred_uncerts)):
bar.update(i)
# save colored uncertainty maps
plt.imsave(os.path.join(opt.output_dir, folder_name, '%06d_10.png' % i), pred_uncerts[i], cmap='hot')
# see you next time!
print("\n-> Done!")
if __name__ == "__main__":
warnings.simplefilter("ignore", UserWarning)
options = UncertaintyOptions()
evaluate(options.parse())
| 19,995 | 44.036036 | 120 | py |
GrUMoDepth | GrUMoDepth-main/gradients.py | import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import time
def squared_difference(prediction, ground_truth):
difference = torch.abs(prediction - ground_truth) ** 2
return difference
class Gradient_Analysis(nn.Module):
def __init__(self, model: nn.Module, layer_list: list, height: int, width: int, reduction: str):
super().__init__()
self.model = model
self.grad_dict = {}
self.height = height
self.width = width
self.stop_time = 0
self.reduction = reduction
for layer in layer_list:
self.grad_dict[layer] = []
for name, layer in self.model.named_modules():
layer.__name__ = name
if name in layer_list:
layer.register_backward_hook(self.get_feature_map_gradient())
def get_feature_map_gradient(self):
def bn(layer, _, grad_output):
gradients = grad_output[0].cpu().numpy()
self.stop_time = time.time()
if gradients.shape[1] == 1:
gradients = gradients.squeeze()
else:
if self.reduction == 'sum':
gradients = np.sum(gradients, axis=1)
elif self.reduction == 'mean':
gradients = np.mean(gradients, axis=1)
elif self.reduction == 'max':
gradients = np.max(gradients, axis=1)
elif self.reduction == 'norm':
gradients = np.linalg.norm(gradients, axis=1)
gradients = (gradients - np.min(gradients)) / (np.max(gradients) - np.min(gradients))
if gradients.shape[1] != self.height or gradients.shape[2] != self.width:
resized_grads = []
for i in range(gradients.shape[0]):
temp = cv2.resize(gradients[i], dsize=(self.width, self.height))
temp = np.expand_dims(temp, axis=0)
resized_grads.append(temp)
gradients = np.concatenate(resized_grads)
self.grad_dict[layer.__name__].append(gradients)
return bn
def get_gradients(self):
grad_list = []
for key in self.grad_dict.keys():
grad_list.append(np.concatenate(self.grad_dict[key]))
grad_array = np.array(grad_list)
if len(grad_list) == 1:
return grad_array.squeeze(0)
else:
mean_grad = np.sum(grad_array, axis=0) / len(grad_array)
layer_vog = np.sqrt(sum([(mm-mean_grad)**2 for mm in grad_array]) / len(grad_array))
return layer_vog
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
| 2,728 | 36.902778 | 100 | py |
GrUMoDepth | GrUMoDepth-main/networks/decoder_supervised.py | # # Monodepth2 extended to estimate depth and uncertainty
# #
# # This software is licensed under the terms of the Monodepth2 licence
# # which allows for non-commercial use only, the full terms of which are
# # available at https://github.com/nianticlabs/monodepth2/blob/master/LICENSE
# """
# Source: modified from https://github.com/mattpoggi/mono-uncertainty
# """
"""
Source: https://github.com/mattpoggi/mono-uncertainty/blob/master/networks/decoder.py
-- further modified for supervised training: last sigmoid is removed to directly estimate the depth instead of the scaled disparity
"""
import torch
import torch.nn as nn
import numpy as np
from collections import OrderedDict
from monodepth2.layers import *
class DepthUncertaintyDecoder_Supervised(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True, dropout=False, uncert=False,
infer_dropout=False, infer_p=0.01):
super(DepthUncertaintyDecoder_Supervised, self).__init__()
self.dropout = dropout
self.p = 0.2
self.uncert = uncert
self.infer_dropout = infer_dropout
self.infer_p = infer_p
self.output_channels = num_output_channels
self.use_skips = use_skips
self.scales = scales
self.encoder_channels = num_ch_enc
self.decoder_channels = np.array([16, 32, 64, 128, 256])
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_in = self.encoder_channels[-1] if i == 4 else self.decoder_channels[i + 1]
num_out = self.decoder_channels[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_in, num_out)
# upconv_1
num_in = self.decoder_channels[i]
if self.use_skips and i > 0:
num_in += self.encoder_channels[i - 1]
num_out = self.decoder_channels[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_in, num_out)
for s in self.scales:
self.convs[("depthconv", s)] = Conv3x3(self.decoder_channels[s], self.output_channels)
if self.uncert:
self.convs[("uncertconv", s)] = Conv3x3(self.decoder_channels[s], self.output_channels)
self.decoder = nn.ModuleList(list(self.convs.values()))
self.sigmoid = nn.Sigmoid()
def forward(self, input_features):
self.outputs = {}
# decoder
x = input_features[-1]
for i in range(4, -1, -1):
x = self.convs[("upconv", i, 0)](x)
if self.dropout:
x = F.dropout2d(x, p=self.p, training=True)
if self.infer_dropout:
x = F.dropout2d(x, p=self.infer_p, training=True)
x = [upsample(x)]
if self.use_skips and i > 0:
x += [input_features[i - 1]]
x = torch.cat(x, 1)
x = self.convs[("upconv", i, 1)](x)
if self.dropout:
x = F.dropout2d(x, p=self.p, training=True)
if self.infer_dropout:
x = F.dropout2d(x, p=self.infer_p, training=True)
if i in self.scales:
self.outputs[("depth", i)] = self.convs[("depthconv", i)](x)
if self.uncert:
uncerts = self.convs[("uncertconv", i)](x)
self.outputs[("uncert", i)] = uncerts
return self.outputs | 3,431 | 35.903226 | 131 | py |
GrUMoDepth | GrUMoDepth-main/networks/decoder.py | # # Monodepth2 extended to estimate depth and uncertainty
# #
# # This software is licensed under the terms of the Monodepth2 licence
# # which allows for non-commercial use only, the full terms of which are
# # available at https://github.com/nianticlabs/monodepth2/blob/master/LICENSE
# """
# Source: modified from https://github.com/mattpoggi/mono-uncertainty
# """
#
# MIT License
#
# Copyright (c) 2020 Matteo Poggi m.poggi@unibo.it
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Source: https://github.com/mattpoggi/mono-uncertainty/blob/master/networks/decoder.py
--> Further extended to use inference only dropout
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
from monodepth2.layers import *
class MyDataParallel(nn.DataParallel):
def __getattr__(self, name):
return getattr(self.module, name)
class DepthUncertaintyDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True, uncert=False, dropout=False,
infer_dropout=False, infer_p=0.2):
super(DepthUncertaintyDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.p = 0.2
self.uncert = uncert
self.dropout = dropout
self.infer_dropout = infer_dropout
self.infer_p = infer_p
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
for s in self.scales:
self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)
if self.uncert:
self.convs[("uncertconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)
self.decoder = nn.ModuleList(list(self.convs.values()))
self.sigmoid = nn.Sigmoid()
def forward(self, input_features):
self.outputs = {}
# decoder
x = input_features[-1]
for i in range(4, -1, -1):
x = self.convs[("upconv", i, 0)](x)
if self.dropout:
x = F.dropout2d(x, p=self.p, training=True)
if self.infer_dropout:
x = F.dropout2d(x, p=self.infer_p, training=True)
x = [upsample(x)]
if self.use_skips and i > 0:
x += [input_features[i - 1]]
x = torch.cat(x, 1)
x = self.convs[("upconv", i, 1)](x)
if self.dropout:
x = F.dropout2d(x, p=self.p, training=True)
if self.infer_dropout:
x = F.dropout2d(x, p=self.infer_p, training=True)
if i in self.scales:
self.outputs[("dispconv", i)] = self.convs[("dispconv", i)]
disps = self.convs[("dispconv", i)](x)
self.outputs[("disp", i)] = self.sigmoid(disps)
if self.uncert:
uncerts = self.convs[("uncertconv", i)](x)
self.outputs[("uncert", i)] = uncerts
return self.outputs
| 4,777 | 34.656716 | 119 | py |
GrUMoDepth | GrUMoDepth-main/datasets/nyu_dataset.py | # MIT License
#
# Copyright (c) 2019 Diana Wofk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Source: modified from https://github.com/dwofk/fast-depth
"""
import os
import os.path
import numpy as np
import torch.utils.data as data
import h5py
from datasets import transforms
iheight, iwidth = 480, 640 # raw image size
def h5_loader(path):
h5f = h5py.File(path, "r")
rgb = np.array(h5f['rgb'])
rgb = np.transpose(rgb, (1, 2, 0))
depth = np.array(h5f['depth'])
return rgb, depth
class MyDataloader(data.Dataset):
modality_names = ['rgb']
def is_image_file(self, filename):
IMG_EXTENSIONS = ['.h5']
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(self, dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(self, dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if self.is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
color_jitter = transforms.ColorJitter(0.4, 0.4, 0.4)
def __init__(self, root, split, modality='rgb', loader=h5_loader):
root_path = os.getcwd()
self.root = os.path.join(root_path, root)
classes, class_to_idx = self.find_classes(self.root)
imgs = self.make_dataset(self.root, class_to_idx)
assert len(imgs)>0, "Found 0 images in subfolders of: " + self.root + "\n"
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
if split == 'train':
self.transform = self.train_transform
elif split == 'holdout':
self.transform = self.val_transform
elif split == 'val':
self.transform = self.val_transform
else:
raise (RuntimeError("Invalid dataset split: " + split + "\n"
"Supported dataset splits are: train, val"))
self.loader = loader
assert (modality in self.modality_names), "Invalid modality split: " + modality + "\n" + \
"Supported dataset splits are: " + ''.join(self.modality_names)
self.modality = modality
def train_transform(self, rgb, depth):
raise (RuntimeError("train_transform() is not implemented. "))
def val_transform(self, rgb, depth):
raise (RuntimeError("val_transform() is not implemented."))
def __getraw__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (rgb, depth) the raw data.
"""
path, target = self.imgs[index]
rgb, depth = self.loader(path)
return rgb, depth
def __getitem__(self, index):
rgb, depth = self.__getraw__(index)
if self.transform is not None:
rgb_np, depth_np = self.transform(rgb, depth)
else:
raise(RuntimeError("transform not defined"))
if self.modality == 'rgb':
input_np = rgb_np
to_tensor = transforms.ToTensor()
input_tensor = to_tensor(input_np)
while input_tensor.dim() < 3:
input_tensor = input_tensor.unsqueeze(0)
depth_tensor = to_tensor(depth_np)
depth_tensor = depth_tensor.unsqueeze(0)
inputs = {}
inputs[("color", 0, 0)] = input_tensor
inputs["depth_gt"] = depth_tensor
return inputs
def __len__(self):
return len(self.imgs)
class NYUDataset(MyDataloader):
def __init__(self, root, split, modality='rgb', height=224, width=288):
self.split = split
super(NYUDataset, self).__init__(root, split, modality)
self.output_size = (height, width)
def is_image_file(self, filename):
# IMG_EXTENSIONS = ['.h5']
if self.split == 'train':
return (filename.endswith('.h5') and \
'00001.h5' not in filename and '00201.h5' not in filename)
elif self.split == 'holdout':
return ('00001.h5' in filename or '00201.h5' in filename)
elif self.split == 'val':
return (filename.endswith('.h5'))
else:
raise (RuntimeError("Invalid dataset split: " + self.split + "\n"
"Supported dataset splits are: train, val"))
def train_transform(self, rgb, depth):
s = np.random.uniform(1.0, 1.5) # random scaling
depth_np = depth / s
angle = np.random.uniform(-5.0, 5.0) # random rotation degrees
do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip
# perform 1st step of data augmentation
transform = transforms.Compose([
transforms.Resize(240.0 / iheight), # this is for computational efficiency, since rotation can be slow
transforms.Rotate(angle),
transforms.Resize(s),
transforms.HorizontalFlip(do_flip),
transforms.CenterCrop(self.output_size),
])
rgb_np = transform(rgb)
rgb_np = self.color_jitter(rgb_np) # random color jittering
rgb_np = np.asfarray(rgb_np, dtype='float') / 255
depth_np = transform(depth_np)
return rgb_np, depth_np
def val_transform(self, rgb, depth):
depth_np = depth
transform = transforms.Compose([
transforms.Resize(240.0 / iheight),
transforms.CenterCrop(self.output_size),
])
rgb_np = transform(rgb)
rgb_np = np.asfarray(rgb_np, dtype='float') / 255
depth_np = transform(depth_np)
return rgb_np, depth_np | 7,124 | 36.109375 | 114 | py |
GrUMoDepth | GrUMoDepth-main/datasets/transforms.py | # MIT License
#
# Copyright (c) 2019 Diana Wofk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Source: modified from https://github.com/dwofk/fast-depth
"""
from __future__ import division
import torch
import math
import random
from PIL import Image, ImageOps, ImageEnhance
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
import warnings
import scipy.ndimage.interpolation as itpl
import scipy.misc as misc
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See https://en.wikipedia.org/wiki/Hue for more details on Hue.
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return img
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return img
def adjust_gamma(img, gamma, gain=1):
"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
I_out = 255 * gain * ((I_in / 255) ** gamma)
See https://en.wikipedia.org/wiki/Gamma_correction for more details.
Args:
img (PIL Image): PIL Image to be adjusted.
gamma (float): Non negative real number. gamma larger than 1 make the
shadows darker, while gamma smaller than 1 make dark regions
lighter.
gain (float): The constant multiplier.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
input_mode = img.mode
img = img.convert('RGB')
np_img = np.array(img, dtype=np.float32)
np_img = 255 * gain * ((np_img / 255) ** gamma)
np_img = np.uint8(np.clip(np_img, 0, 255))
img = Image.fromarray(np_img, 'RGB').convert(input_mode)
return img
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
class ToTensor(object):
"""Convert a ``numpy.ndarray`` to tensor.
Converts a numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W).
"""
def __call__(self, img):
"""Convert a ``numpy.ndarray`` to tensor.
Args:
img (numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if isinstance(img, np.ndarray):
# handle numpy array
if img.ndim == 3:
img = torch.from_numpy(img.transpose((2, 0, 1)).copy())
elif img.ndim == 2:
img = torch.from_numpy(img.copy())
else:
raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
# backward compatibility
# return img.float().div(255)
return img.float()
class NormalizeNumpyArray(object):
"""Normalize a ``numpy.ndarray`` with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(M1,..,Mn)`` for ``n`` channels, this transform
will normalize each channel of the input ``numpy.ndarray`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, img):
"""
Args:
img (numpy.ndarray): Image of size (H, W, C) to be normalized.
Returns:
Tensor: Normalized image.
"""
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
# TODO: make efficient
print(img.shape)
for i in range(3):
img[:,:,i] = (img[:,:,i] - self.mean[i]) / self.std[i]
return img
class NormalizeTensor(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(M1,..,Mn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
if not _is_tensor_image(tensor):
raise TypeError('tensor is not a torch image.')
# TODO: make efficient
for t, m, s in zip(tensor, self.mean, self.std):
t.sub_(m).div_(s)
return tensor
class Rotate(object):
"""Rotates the given ``numpy.ndarray``.
Args:
angle (float): The rotation angle in degrees.
"""
def __init__(self, angle):
self.angle = angle
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be rotated.
Returns:
img (numpy.ndarray (C x H x W)): Rotated image.
"""
# order=0 means nearest-neighbor type interpolation
return itpl.rotate(img, self.angle, reshape=False, prefilter=False, order=0)
class Resize(object):
"""Resize the the given ``numpy.ndarray`` to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation='nearest'):
assert isinstance(size, int) or isinstance(size, float) or \
(isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be scaled.
Returns:
PIL Image: Rescaled image.
"""
if img.ndim == 3:
return misc.imresize(img, self.size, self.interpolation)
# return np.array(Image.fromarray(img).resize(self.size), resample=PIL.Image.BICUBIC)
elif img.ndim == 2:
return misc.imresize(img, self.size, self.interpolation, 'F')
else:
RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
class CenterCrop(object):
"""Crops the given ``numpy.ndarray`` at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for center crop.
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for center crop.
"""
h = img.shape[0]
w = img.shape[1]
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
# # randomized cropping
# i = np.random.randint(i-3, i+4)
# j = np.random.randint(j-3, j+4)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
Returns:
img (numpy.ndarray (C x H x W)): Cropped image.
"""
i, j, h, w = self.get_params(img, self.size)
"""
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
"""
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if img.ndim == 3:
return img[i:i+h, j:j+w, :]
elif img.ndim == 2:
return img[i:i + h, j:j + w]
else:
raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
class BottomCrop(object):
"""Crops the given ``numpy.ndarray`` at the bottom.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for bottom crop.
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for bottom crop.
"""
h = img.shape[0]
w = img.shape[1]
th, tw = output_size
i = h - th
j = int(round((w - tw) / 2.))
# randomized left and right cropping
# i = np.random.randint(i-3, i+4)
# j = np.random.randint(j-1, j+1)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
Returns:
img (numpy.ndarray (C x H x W)): Cropped image.
"""
i, j, h, w = self.get_params(img, self.size)
"""
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
"""
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if img.ndim == 3:
return img[i:i+h, j:j+w, :]
elif img.ndim == 2:
return img[i:i + h, j:j + w]
else:
raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class HorizontalFlip(object):
"""Horizontally flip the given ``numpy.ndarray``.
Args:
do_flip (boolean): whether or not do horizontal flip.
"""
def __init__(self, do_flip):
self.do_flip = do_flip
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be flipped.
Returns:
img (numpy.ndarray (C x H x W)): flipped image.
"""
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if self.do_flip:
return np.fliplr(img)
else:
return img
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(Lambda(lambda img: adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(Lambda(lambda img: adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(Lambda(lambda img: adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = np.random.uniform(-hue, hue)
transforms.append(Lambda(lambda img: adjust_hue(img, hue_factor)))
np.random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Input image.
Returns:
img (numpy.ndarray (C x H x W)): Color jittered image.
"""
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
pil = Image.fromarray(img)
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return np.array(transform(pil))
class Crop(object):
"""Crops the given PIL Image to a rectangular region based on a given
4-tuple defining the left, upper pixel coordinated, hight and width size.
Args:
a tuple: (upper pixel coordinate, left pixel coordinate, hight, width)-tuple
"""
def __init__(self, i, j, h, w):
"""
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
"""
self.i = i
self.j = j
self.h = h
self.w = w
def __call__(self, img):
"""
Args:
img (numpy.ndarray (C x H x W)): Image to be cropped.
Returns:
img (numpy.ndarray (C x H x W)): Cropped image.
"""
i, j, h, w = self.i, self.j, self.h, self.w
if not(_is_numpy_image(img)):
raise TypeError('img should be ndarray. Got {}'.format(type(img)))
if img.ndim == 3:
return img[i:i + h, j:j + w, :]
elif img.ndim == 2:
return img[i:i + h, j:j + w]
else:
raise RuntimeError(
'img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
def __repr__(self):
return self.__class__.__name__ + '(i={0},j={1},h={2},w={3})'.format(
self.i, self.j, self.h, self.w) | 20,864 | 33.487603 | 107 | py |
ensembleKW | ensembleKW-main/examples/voting.py | import numpy as np
import argparse
import csv
import tensorflow as tf
from scriptify import scriptify
def readFile(count=2,
dir="evalData/l_inf/mnist_small_0_1/mnist_small_0_1",
data="test"):
file = dir + "_" + str(count) + "_" + str(data)
result = np.loadtxt(file)
y_pred = result[:, 1]
y_true = result[:, 2]
certified = result[:, 3]
return y_pred, y_true, certified
def matrix_op_robust_voting(y_pred,
y_true,
certified,
weights=None,
num_classes=10,
solve_for_weights=False,
eps=1e-6):
''' Compute the voting ensemble of robust models
Args:
y_pred: list of np.array. Pedictions for each models
y_true: np.array of shape (n_smaples,). True labels.
certified: list of np.array. Boolean array indicating whether the prediction is certified.
weights: list of np.array. Weights for each models. If None, weight each model evenly. Default: None
Returns:
y_ensemble_clean: np.array of shape (n_samples,). Clean predictions for the ensemble
y_ensemble_certificate: np.array of shape (n_samples,). Boolean array indicating whether the prediction is certified.
acc: float. Accuracy of the ensemble
vra: float. VRA of the ensemble
'''
# Construct a Y matrix of shape (n_models, n_sampels, n_classes)
# as the one-hot encoding of the prediction and another C matrix of
# of shape (n_models, n_sampels, n_classes+1) as the prediction includiing bot
Y = []
C = []
for c, y in zip(certified, y_pred):
y = np.array(y).astype(np.int32)
Y.append(np_onehot(y, num_classes=num_classes)[None])
# append \bot if the prediction is not certified
C.append(
np_onehot(np.where(c == np.ones_like(c), y,
(np.zeros_like(c) + num_classes).astype(
np.int32)),
num_classes=num_classes + 1)[None])
Y = np.vstack(Y)
C = np.vstack(C)
# Construct a groundtruth C_hat matrix with an extra column for the bottom classes
C_hat = np_onehot(y_true.astype(np.int32), num_classes=num_classes + 1)
if weights is None:
if solve_for_weights:
weights = find_weights(C, C_hat)
else:
weights = np.ones((C.shape[0], ))
# Do the voting to find the clean prediction
# index with 0 to remove the redundant axis.
votes = np.einsum('ij,jlk->ilk', weights[None],
Y)[0] # shape (n_sampels, n_classes)
y_ensemble_clean = np.argmax(votes, axis=1)
# Do the voting to find the robust prediction
# index with 0 to remove the redundant axis.
votes = np.einsum('ij,jlk->ilk', weights[None],
C)[0] # shape (n_sampels, n_classes+1)
# replace the votes for the top class to -1 to
# differentiate the votes for the top class and
# other classes that have the same votes
votes_bot = votes[:, :-1].copy()
np.put_along_axis(votes_bot, y_ensemble_clean[:, None], -1, axis=1)
# Add the votes for the botom class to all classes except the top.
# Also add eps to the votes for the bottom class in case it is 0
votes_bot = np.where(votes_bot == -1, votes[:, :-1],
votes[:, :-1] + votes[:, -1:] + eps)
# Concatenate the votes of all classes and the votes of the bottom class
votes_bot = np.concatenate([votes_bot, votes[:, -1:]], axis=1)
# find the top class with votes for the bot added to all other classes.
robust_j = np.argmax(votes_bot, axis=1)
# the prediction is certified if votes for the top is still the one without adding votes for the bot.
y_ensemble_certificate = (robust_j == y_ensemble_clean).astype(np.int32)
acc = np.mean(y_ensemble_clean == y_true)
vra = np.mean((y_ensemble_clean == y_true) * y_ensemble_certificate)
return y_ensemble_clean, y_ensemble_certificate, acc, vra, weights
def find_weights(C, C_hat):
return optimize_find_weights(C, C_hat)
def normalize(x):
return x / (tf.reduce_sum(x) + 1e-16)
def half_temp_sigmoid(x, temp):
return tf.where(x<=0, tf.nn.sigmoid(x/temp), tf.nn.sigmoid(x))
def optimize_find_weights(Y_candidates,
Y_hat,
steps=1000,
lr=1e-2,
t1=1e5,
t2=1e-3):
'''
Y_candidates: shape: KxNx(C+1). The one-hot encoding of the predicitons, including \bot, of K models for N points.
Y_hat: shape: Nx(C+1). The one-hot encoding of the labels.
w: shape: K, weights
'''
Y_candidates = tf.cast(Y_candidates, tf.float32)
Y_hat = tf.cast(Y_hat, tf.float32)
K = Y_candidates.shape[0]
w = tf.Variable(initial_value=tf.ones((K, )))
vars = [w]
B = np.zeros_like(Y_hat)
B[:, -1] = 1
B = tf.constant(B)
opt = tf.keras.optimizers.Adam(learning_rate=lr)
pbar = tf.keras.utils.Progbar(steps)
softmax = tf.nn.softmax
for _ in range(steps):
with tf.GradientTape() as tape:
# weighted votes (N, C+1)
tape.watch(w)
valid_w = softmax(w)
Y = tf.squeeze(
tf.einsum('ij,jlk->ilk', valid_w[None], Y_candidates))
# the votes for the grountruth class
y_j = tf.reduce_sum(Y * Y_hat, axis=1)
# the votes for the bottom class
y_bot = tf.reduce_sum(Y * B, axis=1)
# the votes for the highest class except the groudtruth and the bottom classes
# y_second = tf.reduce_sum(Y * softmax(Y * (1 - Y_hat - B) / t2))
# y_second = tf.reduce_max(Y * (1 - Y_hat - B))
y_second = tf.reduce_max(Y * (1 - Y_hat - B), axis=1)
margin = y_j - y_bot - y_second
loss = -tf.reduce_mean(half_temp_sigmoid(margin, t1))
# loss = tf.reduce_mean(relu(-(y_j - y_bot - y_second)))
grads = tape.gradient(loss, vars)
pbar.add(1, [("loss", loss),
('margin', tf.reduce_mean(margin))] + [(f"w{i}", w[i])
for i in range(K)])
opt.apply_gradients(zip(grads, vars))
valid_w = softmax(w)
return valid_w.numpy()
def cascade(y_pred, y_true, certified):
''' use pre-evaluated data for cascade ensemble algorithm
Args:
y_pred: list of np.array. Pedictions for each models
y_true: np.array of shape (n_smaples,). True labels.
certified: list of np.array. Boolean array indicating whether the prediction is certified.
Returns:
acc: float. Accuracy of the ensemble
vra: float. VRA of the ensemble
'''
correct = 0
vra = 0
for i in range(np.shape(y_pred)[1]):
# for each input point
for j in range(np.shape(y_pred)[0]):
# for each model
if certified[j][i] == 1:
if y_pred[j][i] == y_true[j][i]:
correct = correct + 1
vra = vra + 1
break
elif y_pred[j][i] == y_true[j][i] and j == np.shape(y_pred)[0] - 1:
correct = correct + 1
acc = correct / np.shape(y_pred)[1]
vra = vra / np.shape(y_pred)[1]
return acc, vra
def np_onehot(vector, num_classes=None):
if num_classes is None:
num_classes = np.max(vector)
return np.eye(num_classes)[vector]
if __name__ == "__main__":
@scriptify
def script(
model_type="mnist_large_0_1", # mnist_large_0_1, cifar_small_2px
norm="l_inf",
count=3,
weights=None,
solve_for_weights=False,
seq=False):
if seq: dir = "./evalData/seq_trained/" + norm + "/" + model_type + "/" + model_type
else: dir = "./evalData/non_seq_trained/" + norm + "/" + model_type + "/more_" + model_type
y_pred_all = []
y_true_all = []
certified_all = []
results = {}
for i in range(count):
# read all pre-evaluated files
y_pred, y_true, certified = readFile(count=i, dir=dir, data="test")
y_pred_all.append(y_pred)
y_true_all.append(y_true)
certified_all.append(certified)
acc = np.mean(y_pred_all[-1] == y_true)
vra = np.mean((y_pred_all[-1] == y_true) * certified_all[-1])
results.update({
f"model_{i}_acc": float(acc),
f"model_{i}_vra": float(vra)
})
cas_acc, cas_vra = cascade(y_pred_all, y_true_all, certified_all)
results.update({
f"cas_acc": float(cas_acc),
f"cas_vra": float(cas_vra)
})
if weights is not None and not solve_for_weights:
# voting with given weights
weights = np.array(list(map(float, weights.split(','))))
elif solve_for_weights:
# use evaluation on train dataset to find optimized weights
train_y_pred_all = []
train_certified_all = []
for i in range(count):
train_y_pred, train_y_true, train_certified = readFile(
count=i, dir=dir, data="train")
train_y_pred_all.append(train_y_pred)
train_certified_all.append(train_certified)
_, _, _, _, weights = matrix_op_robust_voting(
train_y_pred_all,
train_y_true,
train_certified_all,
solve_for_weights=True,
weights=None)
_, _, vote_acc, vote_vra, weights = matrix_op_robust_voting(
y_pred_all,
y_true,
certified_all,
solve_for_weights=False,
weights=weights)
results.update({
f"vote_acc": float(vote_acc),
f"vote_vra": float(vote_vra)
})
weights = str(list(weights))
results.update({'ensemble_weights': weights})
print(results)
return results
| 10,245 | 33.267559 | 125 | py |
ensembleKW | ensembleKW-main/examples/primal.py | import setGPU
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from convex_adversarial import DualNetBounds, Affine, full_bias
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import setproctitle
import argparse
import problems as pblm
import cvxpy as cp
import numpy as np
cp2np = lambda x : np.asarray(x.value).T
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--niters', type=int, default=100)
parser.add_argument('--epsilon', type=float, default=0.1)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--prefix', default='temp')
parser.add_argument('--train', action='store_true')
parser.add_argument('--mnist', action='store_true')
parser.add_argument('--svhn', action='store_true')
parser.add_argument('--har', action='store_true')
parser.add_argument('--fashion', action='store_true')
args = parser.parse_args()
if args.mnist:
train_loader, test_loader = pblm.mnist_loaders(args.batch_size)
model = pblm.mnist_model().cuda()
# model.load_state_dict(torch.load('icml/mnist_epochs_100_baseline_model.pth'))
model.load_state_dict(torch.load('icml/mnist_epochs100_model.pth'))
elif args.svhn:
train_loader, test_loader = pblm.svhn_loaders(args.batch_size)
model = pblm.svhn_model().cuda()
model.load_state_dict(torch.load('svhn_new/svhn_epsilon_0_01_schedule_0_001'))
elif args.har:
pass
elif args.fashion:
pass
else:
raise ValueError("Need to specify which problem.")
for p in model.parameters():
p.requires_grad = False
epsilon = 0.1
num_classes = model[-1].out_features
log = open(args.prefix + "_primal.log", "w")
loader = train_loader if args.train else test_loader
for j,(X,y) in enumerate(loader):
print('*** Batch {} ***'.format(j))
dual = DualNetBounds(model, Variable(X.cuda()), epsilon, True, True)
C = torch.eye(num_classes).type_as(X)[y].unsqueeze(1) - torch.eye(num_classes).type_as(X).unsqueeze(0)
upper_bound = -dual.g(Variable(C.cuda())).data
layers = dual.layers
affine = dual.affine
k = len(layers)
W = [l(Variable(torch.eye(l.in_features).cuda())).t().cpu().data.numpy() for l in affine]
b = [bias.view(-1).cpu().data.numpy() for bias in dual.biases]
for i0,x in enumerate(X.numpy()):
# if i0 == 0:
# continue
print('*** Example {} ***'.format(i0))
x = x.reshape(-1)
zl = [l.data[i0,:].cpu().numpy() for l in dual.zl]
zu = [u.data[i0,:].cpu().numpy() for u in dual.zu]
I_minus = [u < 0 for u in zu]
I_plus = [l > 0 for l in zl]
I = [(u >= 0) * (l <= 0) for u,l in zip(zu,zl)]
primal_values = []
for j0,c in enumerate(C[i0].numpy()):
z = [cp.Variable(l.in_features) for l in affine]
zhat = [cp.Variable(l.out_features) for l in affine]
cons_eq = [zhat[i] == W[i]*z[i] + b[i] for i in range(k)]
cons_ball = [z[0] >= x - epsilon, z[0] <= x + epsilon]
cons_zero = [z[i] >= 0 for i in range(1,k)]
cons_linear = [z[i+1] >= zhat[i] for i in range(k-1)]
cons_upper = [(cp.mul_elemwise(-(np.maximum(zu[i],0) - np.maximum(zl[i], 0)), zhat[i]) +
cp.mul_elemwise((zu[i] - zl[i]), z[i+1]) <=
zu[i]*np.maximum(zl[i],0) - zl[i]*np.maximum(zu[i],0)) for i in range(k-1)]
cons = cons_eq + cons_ball + cons_zero + cons_linear + cons_upper
fobj = cp.Problem(cp.Minimize(c*zhat[-1]), cons).solve(verbose=False)
print(i0, j0, upper_bound[i0][j0], -fobj)
print(i0, j0, upper_bound[i0][j0], -fobj, file=log) | 4,127 | 37.579439 | 110 | py |
ensembleKW | ensembleKW-main/examples/evaluate.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import random
import setproctitle
import problems as pblm
from trainer import *
from convex_adversarial import robust_loss, robust_loss_parallel
import math
import numpy
def select_mnist_model(m):
if m == 'large':
model = pblm.mnist_model_large().cuda()
# _, test_loader = pblm.mnist_loaders(8)
else:
model = pblm.mnist_model().cuda()
return model
def select_cifar_model(m):
if m == 'large':
# raise ValueError
model = pblm.cifar_model_large().cuda()
elif m == 'resnet':
model = pblm.cifar_model_resnet(N=1, factor=1).cuda()
else:
model = pblm.cifar_model().cuda()
return model
def robust_verify(models, epsilon, X, **kwargs):
if X.size(0) == 1:
rl = robust_loss_parallel
else:
rl = robust_loss
out = model(X)
_, uncertified = rl(model,
epsilon,
X,
out.max(1)[1],
size_average=False,
**kwargs)
certified = ~uncertified
return out.max(1)[1], certified
def evaluate_robustness(loader, model, epsilon, epoch, log, verbose, **kwargs):
for i, (X, y) in enumerate(loader):
X, y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
y_pred, certified = robust_verify(model, epsilon, Variable(X),
**kwargs)
print(i, y_pred, y, certified, file=log)
if verbose and i % verbose == 0:
print(i, y_pred, y, certified)
torch.cuda.empty_cache()
return True
torch.set_grad_enabled(False)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
random.seed(0)
numpy.random.seed(0)
if __name__ == "__main__":
args = pblm.argparser_evaluate(epsilon=0.1, norm='l1')
print("saving file to {}".format(args.output))
setproctitle.setproctitle(args.output)
kwargs = pblm.args2kwargs(args)
if args.dataset == "mnist":
train_loader, test_loader = pblm.mnist_loaders(args.batch_size)
select_model = select_mnist_model
elif args.dataset == "cifar":
train_loader, test_loader = pblm.cifar_loaders(args.batch_size)
select_model = select_cifar_model
d = torch.load(args.load)
models = []
for sd in d['state_dict']:
m = select_model(args.model)
m.load_state_dict(sd)
models.append(m)
num_models = len(models)
print("number of models: ", num_models)
for model in models:
model.eval()
for j, model in enumerate(models):
if num_models == 1: #implies that we are evaluating non-sequentially trained models one-by-one
train_log = open(args.output + "_train", "w")
test_log = open(args.output + "_test", "w")
else:
train_log = open(args.output + str(j) + "_train", "w")
test_log = open(args.output + str(j) + "_test", "w")
err = evaluate_robustness(train_loader,
model,
args.epsilon,
0,
train_log,
args.verbose,
norm_type=args.norm,
bounded_input=False,
**kwargs)
err = evaluate_robustness(test_loader,
model,
args.epsilon,
0,
test_log,
args.verbose,
norm_type=args.norm,
bounded_input=False,
**kwargs)
| 4,081 | 27.545455 | 103 | py |
ensembleKW | ensembleKW-main/examples/mnist_epsilon.py | import waitGPU
waitGPU.wait(utilization=20, available_memory=10000, interval=10)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from convex_adversarial import DualNetBounds
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import setproctitle
import argparse
import problems as pblm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=20)
parser.add_argument('--epsilon', type=float, default=0.1)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--threshold', type=float, default=1e-4)
parser.add_argument('--prefix', default='temp')
parser.add_argument('--train', action='store_true')
parser.add_argument('--mnist', action='store_true')
parser.add_argument('--svhn', action='store_true')
parser.add_argument('--har', action='store_true')
parser.add_argument('--fashion', action='store_true')
parser.add_argument('--model')
args = parser.parse_args()
if args.mnist:
train_loader, test_loader = pblm.mnist_loaders(args.batch_size)
model = pblm.mnist_model().cuda()
model.load_state_dict(torch.load('icml/mnist_epochs_100_baseline_model.pth'))
elif args.svhn:
train_loader, test_loader = pblm.svhn_loaders(args.batch_size)
model = pblm.svhn_model().cuda()
model.load_state_dict(torch.load('pixel2/svhn_small_batch_size_50_epochs_100_epsilon_0.0078_l1_proj_50_l1_test_median_l1_train_median_lr_0.001_opt_adam_schedule_length_20_seed_0_starting_epsilon_0.001_checkpoint.pth')['state_dict'])
elif args.model == 'cifar':
train_loader, test_loader = pblm.cifar_loaders(args.batch_size)
model = pblm.cifar_model().cuda()
model.load_state_dict(torch.load('pixel2/cifar_small_batch_size_50_epochs_100_epsilon_0.0347_l1_proj_50_l1_test_median_l1_train_median_lr_0.05_momentum_0.9_opt_sgd_schedule_length_20_seed_0_starting_epsilon_0.001_weight_decay_0.0005_checkpoint.pth')['state_dict'])
elif args.har:
pass
elif args.fashion:
pass
else:
raise ValueError("Need to specify which problem.")
for p in model.parameters():
p.requires_grad = False
num_classes = model[-1].out_features
correct = []
incorrect = []
l = []
loader = train_loader if args.train else test_loader
for j,(X,y) in enumerate(loader):
print('*** Batch {} ***'.format(j))
epsilon = Variable(args.epsilon*torch.ones(X.size(0)).cuda(), requires_grad=True)
X, y = Variable(X).cuda(), Variable(y).cuda()
out = Variable(model(X).data.max(1)[1])
# form c without the 0 row
c = Variable(torch.eye(num_classes).type_as(X.data)[out.data].unsqueeze(1) - torch.eye(num_classes).type_as(X.data).unsqueeze(0))
I = (~(out.data.unsqueeze(1) == torch.arange(num_classes).type_as(out.data).unsqueeze(0)).unsqueeze(2))
c = (c[I].view(X.size(0),num_classes-1,num_classes))
if X.is_cuda:
c = c.cuda()
alpha = args.alpha
def f(eps):
dual = DualNetBounds(model, X, eps.unsqueeze(1), True, True)
f = -dual.g(c)
return (f.max(1)[0])
for i in range(args.niters):
f_max = f(epsilon)
# if done, stop
if (f_max.data.abs() <= args.threshold).all():
break
# otherwise, compute gradient and update
(f_max).sum().backward()
alpha = args.alpha
epsilon0 = Variable((epsilon - alpha*(f_max/(epsilon.grad))).data,
requires_grad=True)
while (f(epsilon0).data.abs().sum() >= f_max.data.abs().sum()):
alpha *= 0.5
epsilon0 = Variable((epsilon - alpha*(f_max/(epsilon.grad))).data,
requires_grad=True)
if alpha <= 1e-3:
break
epsilon = epsilon0
del f_max
if i == args.niters - 1:
l.append(j)
if (y==out).data.sum() > 0:
correct.append(epsilon[y==out])
if (y!=out).data.sum() > 0:
incorrect.append(epsilon[y!=out])
del X, y
print(l)
torch.save(torch.cat(correct, 0), '{}_correct_epsilons.pth'.format(args.prefix))
torch.save(torch.cat(incorrect, 0), '{}_incorrect_epsilons.pth'.format(args.prefix)) | 4,604 | 37.057851 | 272 | py |
ensembleKW | ensembleKW-main/examples/har.py | # import waitGPU
# waitGPU.wait(utilization=20, interval=60)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.utils.data as td
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import setproctitle
import argparse
import problems as pblm
from trainer import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--epsilon", type=float, default=0.05)
parser.add_argument("--starting_epsilon", type=float, default=None)
parser.add_argument('--prefix')
parser.add_argument('--baseline', action='store_true')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--alpha_grad', action='store_true')
parser.add_argument('--scatter_grad', action='store_true')
parser.add_argument('--old_weights', action='store_true')
parser.add_argument('--l1_proj', type=int, default=None)
args = parser.parse_args()
args.prefix = args.prefix or 'har_conv_{:.4f}_{:.4f}_0'.format(args.epsilon, args.lr).replace(".","_")
setproctitle.setproctitle(args.prefix)
train_log = open(args.prefix + "_train.log", "w")
test_log = open(args.prefix + "_test.log", "w")
train_loader, test_loader = pblm.har_loaders(args.batch_size)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
model = pblm.har_500_model().cuda()
# model = pblm.har_resnet_model().cuda()
opt = optim.Adam(model.parameters(), lr=args.lr)
for t in range(args.epochs):
if args.baseline:
train_baseline(train_loader, model, opt, t, train_log, args.verbose)
evaluate_baseline(test_loader, model, t, test_log, args.verbose)
else:
if t <= args.epochs//2 and args.starting_epsilon is not None:
epsilon = args.starting_epsilon + (t/(args.epochs//2))*(args.epsilon - args.starting_epsilon)
else:
epsilon = args.epsilon
train_robust(train_loader, model, opt, epsilon, t, train_log,
args.verbose,
args.alpha_grad, args.scatter_grad, l1_proj=args.l1_proj)
evaluate_robust(test_loader, model, args.epsilon, t, test_log, args.verbose)
torch.save(model.state_dict(), args.prefix + "_model.pth") | 2,605 | 39.092308 | 109 | py |
ensembleKW | ensembleKW-main/examples/runtime.py | import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import gpustat
import numpy as np
from problems import Flatten
def gpu_mem():
stats = gpustat.GPUStatCollection.new_query()
for gpu in stats:
util = gpu.entry['memory.used']
break
return util
# random points at least 2r apart
m = 10
np.random.seed(3)
x = [np.random.uniform(size=(1,28,28))]
r = 0.16
while(len(x) < m):
p = np.random.uniform(size=(1,28,28))
if min(np.abs(p-a).sum() for a in x) > 2*r:
x.append(p)
# r = 0.145
epsilon = r/2
X = torch.Tensor(np.array(x)).cuda()
torch.manual_seed(1)
y = (torch.rand(m)+0.5).long().cuda()
import sys
sys.path.append("../")
from convex_adversarial import robust_loss
import time
class Meter:
def __init__(self):
self.l = [[]]
def add(self, x):
self.l[-1].append(x)
def next(self):
self.l.append([])
def save(self, fname):
x = np.array(self.l[:-1])
np.savetxt(fname, x)
xs, ys = Meter(), Meter()
mems = Meter()
PROJ = True
for j in range(1,1001):
try:
for _ in range(10):
torch.cuda.empty_cache()
start_mem = gpu_mem()
# torch.manual_seed(1)
robust_net = nn.Sequential(
nn.Conv2d(1, j, 3, stride=1, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(j*28*28,2)
).cuda()
data = []
opt = optim.Adam(robust_net.parameters(), lr=1e-3)
ts = []
for i in range(10):
start_time = time.time()
if PROJ:
robust_ce, robust_err = robust_loss(robust_net, epsilon, X, y,
parallel=False, l1_proj=50, l1_type='median')
else:
robust_ce, robust_err = robust_loss(robust_net, epsilon, X, y,
parallel=False)
out = robust_net(X)
l2 = nn.CrossEntropyLoss()(out, y).item()
err = (out.max(1)[1] != y).float().mean().item()
data.append([l2, robust_ce.item(), err, robust_err])
# if i % 100 == 0:
# print(robust_ce.item(), robust_err)
opt.zero_grad()
(robust_ce).backward()
opt.step()
end_time = time.time()
ts.append(end_time-start_time)
end_mem = gpu_mem()
mems.add(end_mem)
# print(start_mem, end_mem)
del robust_net, robust_ce, l2, robust_err, err, out, opt
# print(globals().keys())
# assert False
# print(l2, robust_ce.item(), robust_err)
ts = np.array(ts[1:])
xs.add(j*100*4)
ys.add(ts.mean())
print(j*28*28, ts.mean(), end_mem)
mems.next()
xs.next()
ys.next()
except:
break
if PROJ:
xs.save('sizes_conv_proj.txt')
ys.save('epoch_conv_times_proj.txt')
mems.save('memory_conv_proj.txt')
else:
xs.save('sizes_conv_full.txt')
ys.save('epoch_conv_times_full.txt')
mems.save('memory_conv_full.txt') | 3,227 | 25.459016 | 82 | py |
ensembleKW | ensembleKW-main/examples/attack_ensemble.py | import random
import numpy as np
import setproctitle
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import tqdm
from absl import app
from ml_collections import config_flags
from torch.autograd import Variable
import problems as pblm
from convex_adversarial import RobustBounds, robust_loss, robust_loss_parallel
from trainer import * # pylint: disable=import-error
from time import time # pylint: disable=import-error
from dbify import dbify
from flatten_dict import flatten as tree_flatten
cudnn.benchmark = True
device = 'cuda:0'
cifar_mean = [0.485, 0.456, 0.406]
cifar_std = [0.225, 0.225, 0.225]
@dbify('ensemblekw', 'cifar')
def store_experiement(**kwargs):
return {}
def select_mnist_model(m):
if m == 'large':
model = pblm.mnist_model_large().to(device)
# _, test_loader = pblm.mnist_loaders(8)
else:
model = pblm.mnist_model().to(device)
return model
def select_cifar_model(m):
if m == 'large':
# raise ValueError
model = pblm.cifar_model_large().to(device)
elif m == 'resnet':
model = pblm.cifar_model_resnet(N=1, factor=1).to(device)
else:
model = pblm.cifar_model().to(device)
return model
def cross_entropy(*, p_logits, q_probits, reduction='mean'):
"""
This is equivalent to the KL divergence when requiring gradients only on p_logits.
:param input: (batch, *)
:param target: (batch, *) same shape as input, each item must be a
valid distribution: target[i, :].sum() == 1.
"""
logprobs = torch.nn.functional.log_softmax(p_logits.view(
p_logits.shape[0], -1),
dim=1)
batchloss = -torch.sum(q_probits.view(q_probits.shape[0], -1) * logprobs,
dim=1)
if reduction == 'none':
return batchloss
elif reduction == 'mean':
return torch.mean(batchloss)
elif reduction == 'sum':
return torch.sum(batchloss)
else:
raise NotImplementedError('Unsupported reduction mode.')
def sparse_cross_entropy(*, p_logits, q_sparse, reduction='mean'):
num_classes = p_logits.shape[1]
q_probits = torch.nn.functional.one_hot(q_sparse, num_classes=num_classes)
return cross_entropy(p_logits=p_logits,
q_probits=q_probits,
reduction=reduction)
# Evaluates the casacade given data X and labels y.
# match_y=True if the predictions of cascade count as accurate when they match label y,
# while match_y=False if the predictions of cascade count as accurate when they do not match label y
def eval_cascade(config, models, X, y, match_y=True):
if config.data.normalization == '01':
eps = config.attack.eps
elif config.data.normalization == '-11':
eps = 2 * config.attack.eps
elif config.data.normalization == 'meanstd':
eps = config.attack.eps / cifar_std[0]
else:
raise ValueError(
f"The range of the data `{config.data.normalization}` is not understood."
)
torch.set_grad_enabled(False)
I = torch.arange(X.size(0)).type_as(y.data)
# Map from modelid to indices of elements in X where model <modelid>
# is used to make the ensemble prediction and the predictions are certified robust.
CR_modelid_idx_map = {}
# Map from modelid to indices of elements in X where model <modelid>
# is used to make the ensemble prediction and the predictions are certified robust & accurate.
CRA_modelid_idx_map = {}
# List of indices of elements in X where ensemble predictions are accurate but not certified robust.
# Since all such predictions are made by the last model in the ensemble, we do not need to record the modelid.
A_idxs = []
for j, model in enumerate(models):
# print("attack_ensemble:56: ", float(torch.cuda.memory_allocated())/(1000*1000*1000))
# print("attack_ensemble:56: ", float(torch.cuda.max_memory_allocated())/(1000*1000*1000))
out = model(X)
_, uncertified = robust_loss(
model,
eps,
X,
out.max(1)[1],
size_average=False,
device_ids=[0, 1],
parallel=True,
norm_type='l1' if config.attack.norm == 'linf' else 'l2')
certified = ~uncertified
if j == len(models) - 1:
if match_y:
uncertified_acc = torch.logical_and(uncertified,
out.max(1)[1] == y)
else:
uncertified_acc = torch.logical_and(uncertified,
out.max(1)[1] != y)
A_idxs += I[uncertified_acc.nonzero()[:, 0]].tolist()
if certified.sum() == 0:
pass
# print("Warning: Cascade stage {} has no certified values.".format(j+1))
else:
CR_idxs = I[certified.nonzero()[:, 0]].tolist()
if len(CR_idxs) > 0:
CR_modelid_idx_map[j] = CR_idxs
if match_y:
certified_acc = torch.logical_and(certified,
out.max(1)[1] == y)
else:
certified_acc = torch.logical_and(certified,
out.max(1)[1] != y)
CRA_idxs = I[certified_acc.nonzero()[:, 0]].tolist()
if len(CRA_idxs) > 0:
CRA_modelid_idx_map[j] = CRA_idxs
# reduce data set to uncertified examples
if uncertified.sum() > 0:
X = X[uncertified.nonzero()[:, 0]]
y = y[uncertified.nonzero()[:, 0]]
I = I[uncertified.nonzero()[:, 0]]
else:
torch.cuda.empty_cache()
torch.set_grad_enabled(True)
return CR_modelid_idx_map, CRA_modelid_idx_map, A_idxs
####################################################################
torch.cuda.empty_cache()
torch.set_grad_enabled(True)
return CR_modelid_idx_map, CRA_modelid_idx_map, A_idxs
def make_objective_fn(config, cert_needed=True):
if not cert_needed:
def objective_fn(j, model, all_models, eps, X_pgd, y_pred):
# for model j, we encourage it to have a different prediction at the current input.
loss_cert = sparse_cross_entropy(p_logits=model(X_pgd),
q_sparse=y_pred,
reduction='none')
# for all model i < j, they should fail to certify the current input.
loss_nocert = torch.zeros(loss_cert.size()).type_as(loss_cert.data)
for k in range(j):
output_k = all_models[k](X_pgd)
# To make model i, i<j, fail to certify the input,
# we push the adversarial point closer to the decision boundary
# by minimizing the KL divergence betweent its class probability
# distribution and a uniform distribution.
unif_dist = torch.ones(output_k.size()).type_as(
output_k.data) / float(output_k.size(1))
loss_nocert += cross_entropy(p_logits=output_k,
q_probits=unif_dist,
reduction='none')
return loss_cert - loss_nocert
elif not config.attack.do_surrogate:
def objective_fn(j, model, all_models, eps, X_pgd, y_pred):
# for model j, we encourage it to certify the current input.
loss_cert, _ = robust_loss(
model,
eps,
X_pgd,
y_pred,
size_average=False,
device_ids=[0, 1],
parallel=True,
norm_type='l1' if config.attack.norm == 'linf' else 'l2')
# for all model i < j, they should fail to certify the current input.
loss_nocert = torch.zeros(loss_cert.size()).type_as(loss_cert.data)
for k in range(j):
worse_case_logit_k = RobustBounds(all_models[k], eps)(X_pgd,
y_pred)
# To make model i, i<j, fail to certify the input,
# we push the adversarial point closer to the decision boundary
# by minimizing the KL divergence betweent its class probability
# distribution and a uniform distribution.
unif_dist = torch.ones(worse_case_logit_k.size()).type_as(
worse_case_logit_k.data) / float(
worse_case_logit_k.size(1))
loss_nocert += cross_entropy(p_logits=worse_case_logit_k,
q_probits=unif_dist,
reduction='none')
return -loss_cert - loss_nocert
else:
def objective_fn(j, model, all_models, eps, X_pgd, y_pred):
# for model j, we encourage it to certify the current input.
loss_cert = sparse_cross_entropy(p_logits=model(X_pgd),
q_sparse=y_pred,
reduction='none')
# for all model i < j, they should fail to certify the current input.
loss_nocert = torch.zeros(loss_cert.size()).type_as(loss_cert.data)
for k in range(j):
output_k = all_models[k](X_pgd)
# To make model i, i<j, fail to certify the input,
# we push the adversarial point closer to the decision boundary
# by minimizing the KL divergence betweent its class probability
# distribution and a uniform distribution.
unif_dist = torch.ones(output_k.size()).type_as(
output_k.data) / float(output_k.size(1))
loss_nocert += cross_entropy(p_logits=output_k,
q_probits=unif_dist,
reduction='none')
return -loss_cert - loss_nocert
return objective_fn
def attack_step(config, models, data, labels, modelid):
last_modelid = len(models) - 1
data_clone = torch.clone(data)
labels_clone = torch.clone(labels)
noisy_data = []
idx_for_all_data = torch.arange(data_clone.size(0)).type_as(
labels_clone.data)
if config.data.normalization == '01':
eps = config.attack.eps
data_min = 0.
data_max = 1.
step_size = config.attack.step_size
elif config.data.normalization == '-11':
eps = 2 * config.attack.eps
data_min = -1.
data_max = 1.
step_size = 2 * config.attack.step_size
elif config.data.normalization == 'meanstd':
data_shape = data.shape[2:]
r_channel_min = (torch.zeros(data_shape) -
cifar_mean[0]) / cifar_std[0]
r_channel_max = (torch.zeros(data_shape) +
cifar_mean[0]) / cifar_std[0]
g_channel_min = (torch.zeros(data_shape) -
cifar_mean[1]) / cifar_std[1]
g_channel_max = (torch.zeros(data_shape) +
cifar_mean[1]) / cifar_std[1]
b_channel_min = (torch.zeros(data_shape) -
cifar_mean[2]) / cifar_std[2]
b_channel_max = (torch.zeros(data_shape) +
cifar_mean[2]) / cifar_std[2]
data_min = torch.stack([r_channel_min, g_channel_min, b_channel_min],
dim=0).to(device)
data_max = torch.stack([r_channel_max, g_channel_max, b_channel_max],
dim=0).to(device)
eps = config.attack.eps / cifar_std[0]
step_size = config.attack.step_size / cifar_std[0]
else:
raise ValueError(
f"The range of the data `{config.data.normalization}` is not understood."
)
def smart_clamp(data_to_clip):
if isinstance(data_max, float):
return torch.clamp(data_to_clip, data_min, data_max)
else:
return torch.max(torch.min(data_to_clip, data_max[None]),
data_min[None])
attack_objective_fn = make_objective_fn(config)
# This array stores where we have already found an adversarial example for
# each input. If an adversarial example is found, we stop the attack.
keep_attack = torch.ones(data.size(0)).type_as(data.data)
for j, model in enumerate(models):
if j == last_modelid:
attack_objective_fn = make_objective_fn(config, cert_needed=False)
else:
# if model j is the one that makes the clean prediction,
# we skip it
if j == modelid:
continue
# The predicted label of model j
y_pred = model(data_clone).max(1)[1]
if j == last_modelid:
candidates = keep_attack.nonzero().squeeze(1)
else:
# We are only interested in models that already make different predicitons
# from the previous certifier, i.e. the first model that cerfifies its prediciton
# for the clean input, because if model j makes the same prediciton as the
# certifier (model modelid), it is impossible to find an adversarial point within the eps-ball
# that outputs a different label with certificate.
candidates = torch.logical_and((y_pred != labels_clone),
keep_attack)
candidates = candidates.nonzero().squeeze(1)
# If there is no candidate, we skip this model j.
if len(candidates) < 1:
continue
candidate_idx = idx_for_all_data[candidates]
data_candidates = data_clone[candidates]
candidate_pred = y_pred[candidates]
candidate_labels = labels_clone[candidates]
candidate_keep_attack = torch.clone(keep_attack[candidates])
# TODO: implement random start
data_pgd = Variable(data_candidates, requires_grad=True)
for _ in range(config.attack.steps):
# TODO add Adam optimizer
# opt_pgd = optim.Adam([X_pgd], lr=config.attack.step_size)
# if j == last_modelid:
# loss = attack_objective_fn(j, model, models, eps, data_pgd,
# candidate_labels)
# else:
# loss = attack_objective_fn(j, model, models, eps, data_pgd,
# candidate_pred)
loss = attack_objective_fn(j, model, models, eps, data_pgd,
candidate_pred)
loss.mean().backward()
if config.attack.norm == 'linf':
# l_inf PGD
eta = step_size * data_pgd.grad.data.sign()
data_pgd = Variable(data_pgd.data + eta, requires_grad=True)
eta = torch.clamp(data_pgd.data - data_candidates, -eps, eps)
# data_pgd.data = data_candidates + eta * candidate_keep_attack.view(-1, 1, 1, 1)
data_pgd.data = data_candidates + eta
elif config.attack.norm == 'l2':
# l_2 PGD
# Assumes X_candidates and X_pgd are batched tensors where the first dimension is
# a batch dimension, i.e., .view() assumes batched images as a 4D Tensor
grad_norms = torch.linalg.norm(data_pgd.grad.view(
data_pgd.shape[0], -1),
dim=1)
eta = step_size * \
data_pgd.grad / grad_norms.view(-1, 1, 1, 1)
data_pgd = Variable(data_pgd.data + eta, requires_grad=True)
delta = data_pgd.data - data_candidates
mask = torch.linalg.norm(delta.view(delta.shape[0], -1),
dim=1) <= eps
scaling_factor = torch.linalg.norm(delta.view(
delta.shape[0], -1),
dim=1)
scaling_factor[mask] = eps
delta *= eps / scaling_factor.view(-1, 1, 1, 1)
# data_pgd.data = data_candidates + delta * candidate_keep_attack.view(-1, 1, 1, 1)
data_pgd.data = data_candidates + delta
# Clip the input to a valid data range.
data_pgd.data = smart_clamp(data_pgd.data)
# Check whether the model is certifiably robust on a different label after the attack.
_, CRA_modelid_idx_map, A_idxs = eval_cascade(config,
models,
data_pgd,
candidate_labels,
match_y=False)
CRA_idxs = []
for _, idxs in CRA_modelid_idx_map.items():
CRA_idxs += idxs
if CRA_idxs:
# If we have found an adversarial example for an input, we stop the attack.
candidate_keep_attack[torch.tensor(CRA_idxs)] = 0
if A_idxs:
# If we have found an adversarial example for an input, we stop the attack.
candidate_keep_attack[torch.tensor(A_idxs)] = 0
keep_attack[candidates] = torch.minimum(candidate_keep_attack,
keep_attack[candidates])
#TODO: fix such that noisy data contains the perturbed inputs
noisy_data = data[keep_attack == 0]
return noisy_data, 1 - keep_attack
def attack(config, loader, models, log):
# data_attackable = []
dataset_size = 0
# Number of samples in the dataset where the ensemble is certified robust and accurate
total_num_CRA = 0
# Number of samples in the dataset where the ensemble is certified robust and accurate
# but our attack was successful
total_num_attackable_CRA = 0
# Number of samples in the dataset where the ensemble is certified robust and accurate
# and our attack was not successful
total_num_not_attackable_CRA = 0
# Number of samples in the dataset where the ensemble is accurate but not certified robust
total_num_A = 0
# Number of samples in the dataset where the ensemble is accurate but not certified robust
# and our attack was successful
total_num_attackable_A = 0
# Number of samples in the dataset where the ensemble is accurate and our attack was unsuccessful
total_num_ERA = 0
duration = 0
num_batches = config.data.n_examples // config.data.batch_size
for batch_id, (data, label) in enumerate(loader):
start = time()
dataset_size += data.size(0)
data, label = data.to(device), label.to(device).long()
if label.dim() == 2:
label = label.squeeze(1)
# CRA_modelid_idx_map is a dictionary mapping from modelid to
# the list of batch-level indices of points where the ensemble uses
# model modelid for prediction and the predictions are certified robust & accurate.
#
# A_idx is a list of batch-level indices of points that the ensemble
# predicts accurately but cannot certify robustness. Since all such points are predicted using
# the last model in the ensemble, we don't need to record the modelid.
_, CRA_modelid_idx_map, A_idx = eval_cascade(config, models, data,
label)
if len(CRA_modelid_idx_map.keys()) == 0 and len(A_idx) == 0:
# CRA_modelid_idx_map is a empty dictionary, which means no point is both certified robust & accurate.
# Also, A_idx is an empty list, which means no point is both not certified robust & accurate.
continue
num_CRA = 0
num_attackable_CRA = 0
if len(CRA_modelid_idx_map.keys()) != 0:
for modelid, idxs in CRA_modelid_idx_map.items():
# CRA = certified robust and accurate
# We take the subset of batch where ensemble certified robust and accurate.
CRA_data = data[torch.tensor(idxs)]
CRA_label = label[torch.tensor(idxs)]
per_mapping_num_CRA = len(idxs)
num_CRA += per_mapping_num_CRA
data_attackable_CRA, is_attackable_CRA = attack_step(
config, models, Variable(CRA_data), Variable(CRA_label),
modelid)
# data_attackable += data_attackable_CRA
num_attackable_CRA += len(data_attackable_CRA)
num_A = 0
num_attackable_A = 0
if len(A_idx) != 0:
# A = accurate (but not certified robust)
# We take the subset of batch where ensemble is accurate but not certified robust.
A_data = data[torch.tensor(A_idx)]
A_label = label[torch.tensor(A_idx)]
num_A += len(A_idx)
data_attackable_A, is_attackable_A = attack_step(
config, models, Variable(A_data), Variable(A_label),
len(models) - 1)
# data_attackable += data_attackable_A
num_attackable_A += len(data_attackable_A)
num_not_attackable_CRA = num_CRA - num_attackable_CRA
total_num_CRA += num_CRA
total_num_not_attackable_CRA += num_not_attackable_CRA
total_num_attackable_CRA += num_attackable_CRA
num_ERA = (num_CRA + num_A) \
- (num_attackable_CRA + num_attackable_A)
total_num_A += num_A
total_num_attackable_A += num_attackable_A
total_num_ERA += num_ERA
if duration > 0:
duration = (time() - start) * 0.05 + duration * 0.95
else:
duration = time() - start
# avoid devide-by-zero error.
num_CRA += 1e-16
if config.verbose:
print(
f"Batch {batch_id}/{num_batches}" +
f" | Clean Accuracy: {(num_CRA+num_A)/config.data.batch_size:.3f} "
+
f" | Unsound CRA: {num_CRA/config.data.batch_size:.3f} " +
f" | Post-Attack Unsound CRA: {num_not_attackable_CRA/config.data.batch_size:.3f}"
+
f" | Attackable Certificate Ratio: {num_attackable_CRA/num_CRA:.3f}"
+ f" | ERA: {num_ERA/config.data.batch_size:.3f}" +
f" | ETA: {0.0167 * duration * (num_batches - batch_id - 1):.1f} min"
)
# avoid devide-by-zero error.
total_num_CRA += 1e-16
metrics = {
"acc": float((total_num_CRA + total_num_A) / dataset_size),
"pre_unsound_cra": float(total_num_CRA / dataset_size),
"post_unsound_cra": float(total_num_not_attackable_CRA / dataset_size),
"attackable_cra": float(total_num_attackable_CRA / total_num_CRA),
"era": float(total_num_ERA / dataset_size),
}
print("Num inputs: ", dataset_size)
print("Clean Accuracy: ", (total_num_CRA + total_num_A) / dataset_size)
print("Unsound CRA: ", total_num_CRA / dataset_size)
print("Post-Attack Unsound CRA: ",
total_num_not_attackable_CRA / dataset_size)
print("Attackable Certificate Ratio: ",
total_num_attackable_CRA / total_num_CRA)
print("ERA: ", total_num_ERA / dataset_size)
flat_config = tree_flatten(config.to_dict(), reducer='underscore')
flat_config.update(metrics)
store_experiement(**flat_config)
return
_CONFIG = config_flags.DEFINE_config_file('config')
def main(_):
config = _CONFIG.value
config.lock()
print(config)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
random.seed(config.seed)
np.random.seed(config.seed)
setproctitle.setproctitle(config.io.output_file)
batch_size = config.data.batch_size
if config.data.dataset == "mnist":
train_loader, test_loader = pblm.mnist_loaders(batch_size)
select_model = select_mnist_model
elif config.data.dataset == "cifar":
train_loader, test_loader = pblm.cifar_loaders(batch_size)
select_model = select_cifar_model
else:
raise ValueError(
f'{config.data.dataset} is not a valid dataset. Use "mnist" or "cifar".'
)
d = torch.load(config.model.directory)
models = []
for sd in d['state_dict']:
m = select_model(config.model.architecture)
m.load_state_dict(sd)
models.append(m)
num_models = len(models)
print("number of models: ", num_models)
for model in models:
model.eval()
train_log = open(config.io.output_file + "/" + "train_attack", "w")
test_log = open(config.io.output_file + "/ " + "test_attack", "w")
# attack(config, train_loader, models, train_log)
attack(config, test_loader, models, test_log)
if __name__ == '__main__':
app.run(main)
| 25,200 | 38.071318 | 114 | py |
ensembleKW | ensembleKW-main/examples/fashion_mnist.py | import waitGPU
waitGPU.wait(utilization=20)#, available_memory=11000, interval=10)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cvxpy as cp
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import setproctitle
import argparse
import problems as pblm
from trainer import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--epsilon", type=float, default=0.1)
parser.add_argument("--starting_epsilon", type=float, default=None)
parser.add_argument('--prefix')
parser.add_argument('--baseline', action='store_true')
parser.add_argument('--verbose', type=int, default='1')
parser.add_argument('--alpha_grad', action='store_true')
parser.add_argument('--scatter_grad', action='store_true')
parser.add_argument('--l1_proj', type=int, default=None)
parser.add_argument('--large', action='store_true')
parser.add_argument('--vgg', action='store_true')
args = parser.parse_args()
args.prefix = args.prefix or 'fashion_mnist_conv_{:.4f}_{:.4f}_0'.format(args.epsilon, args.lr).replace(".","_")
setproctitle.setproctitle(args.prefix)
train_log = open(args.prefix + "_train.log", "w")
test_log = open(args.prefix + "_test.log", "w")
train_loader, _ = pblm.fashion_mnist_loaders(args.batch_size)
_, test_loader = pblm.fashion_mnist_loaders(2)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if args.large:
model = pblm.mnist_model_large().cuda()
elif args.vgg:
model = pblm.mnist_model_vgg().cuda()
else:
model = pblm.mnist_model().cuda()
opt = optim.Adam(model.parameters(), lr=args.lr)
for t in range(args.epochs):
if t <= args.epochs//2 and args.starting_epsilon is not None:
epsilon = args.starting_epsilon + (t/(args.epochs//2))*(args.epsilon - args.starting_epsilon)
else:
epsilon = args.epsilon
train_robust(train_loader, model, opt, epsilon, t, train_log,
args.verbose,
args.alpha_grad, args.scatter_grad, l1_proj=args.l1_proj)
evaluate_robust(test_loader, model, args.epsilon, t, test_log, args.verbose)
torch.save(model.state_dict(), args.prefix + "_model.pth") | 2,618 | 36.956522 | 116 | py |
ensembleKW | ensembleKW-main/examples/problems.py | # a hack to ensure scripts search cwd
import sys
sys.path.append('.')
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
import numpy as np
import torch.utils.data as td
import argparse
from convex_adversarial import epsilon_from_model, DualNetBounds
from convex_adversarial import Dense, DenseSequential
import math
import os
def model_wide(in_ch, out_width, k):
model = nn.Sequential(
nn.Conv2d(in_ch, 4*k, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4*k, 8*k, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*k*out_width*out_width,k*128),
nn.ReLU(),
nn.Linear(k*128, 10)
)
return model
def model_deep(in_ch, out_width, k, n1=8, n2=16, linear_size=100):
def group(inf, outf, N):
if N == 1:
conv = [nn.Conv2d(inf, outf, 4, stride=2, padding=1),
nn.ReLU()]
else:
conv = [nn.Conv2d(inf, outf, 3, stride=1, padding=1),
nn.ReLU()]
for _ in range(1,N-1):
conv.append(nn.Conv2d(outf, outf, 3, stride=1, padding=1))
conv.append(nn.ReLU())
conv.append(nn.Conv2d(outf, outf, 4, stride=2, padding=1))
conv.append(nn.ReLU())
return conv
conv1 = group(in_ch, n1, k)
conv2 = group(n1, n2, k)
model = nn.Sequential(
*conv1,
*conv2,
Flatten(),
nn.Linear(n2*out_width*out_width,linear_size),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def mnist_loaders(batch_size, shuffle_test=False):
mnist_train = datasets.MNIST("./data", train=True, download=True, transform=transforms.ToTensor())
mnist_test = datasets.MNIST("./data", train=False, download=True, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=False, pin_memory=True)
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=shuffle_test, pin_memory=True)
return train_loader, test_loader
def fashion_mnist_loaders(batch_size):
mnist_train = datasets.MNIST("./fashion_mnist", train=True,
download=True, transform=transforms.ToTensor())
mnist_test = datasets.MNIST("./fashion_mnist", train=False,
download=True, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, pin_memory=True)
return train_loader, test_loader
def mnist_500():
model = nn.Sequential(
Flatten(),
nn.Linear(28*28,500),
nn.ReLU(),
nn.Linear(500, 10)
)
return model
def mnist_model():
model = nn.Sequential(
nn.Conv2d(1, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
def mnist_model_wide(k):
return model_wide(1, 7, k)
def mnist_model_deep(k):
return model_deep(1, 7, k)
def mnist_model_large():
model = nn.Sequential(
nn.Conv2d(1, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*7*7,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
def replace_10_with_0(y):
return y % 10
def svhn_loaders(batch_size):
train = datasets.SVHN("./data", split='train', download=True, transform=transforms.ToTensor(), target_transform=replace_10_with_0)
test = datasets.SVHN("./data", split='test', download=True, transform=transforms.ToTensor(), target_transform=replace_10_with_0)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False, pin_memory=True)
return train_loader, test_loader
def svhn_model():
model = nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
).cuda()
return model
def har_loaders(batch_size):
X_te = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/test/X_test.txt')).float()
X_tr = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/train/X_train.txt')).float()
y_te = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/test/y_test.txt')-1).long()
y_tr = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/train/y_train.txt')-1).long()
har_train = td.TensorDataset(X_tr, y_tr)
har_test = td.TensorDataset(X_te, y_te)
train_loader = torch.utils.data.DataLoader(har_train, batch_size=batch_size, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(har_test, batch_size=batch_size, shuffle=False, pin_memory=True)
return train_loader, test_loader
def har_500_model():
model = nn.Sequential(
nn.Linear(561, 500),
nn.ReLU(),
nn.Linear(500, 6)
)
return model
def har_500_250_model():
model = nn.Sequential(
nn.Linear(561, 500),
nn.ReLU(),
nn.Linear(500, 250),
nn.ReLU(),
nn.Linear(250, 6)
)
return model
def har_500_250_100_model():
model = nn.Sequential(
nn.Linear(561, 500),
nn.ReLU(),
nn.Linear(500, 250),
nn.ReLU(),
nn.Linear(250, 100),
nn.ReLU(),
nn.Linear(100, 6)
)
return model
def har_resnet_model():
model = DenseSequential(
Dense(nn.Linear(561, 561)),
nn.ReLU(),
Dense(nn.Sequential(), None, nn.Linear(561,561)),
nn.ReLU(),
nn.Linear(561,6)
)
return model
def cifar_loaders(batch_size, shuffle_test=False):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.225, 0.225, 0.225])
train = datasets.CIFAR10('./data', train=True, download=True,
transform=transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]))
test = datasets.CIFAR10('./data', train=False,
transform=transforms.Compose([transforms.ToTensor(), normalize]), download=True)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size,
shuffle=False, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size,
shuffle=shuffle_test, pin_memory=True)
return train_loader, test_loader
def cifar_model():
model = nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model_large():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model_resnet(N = 5, factor=10):
def block(in_filters, out_filters, k, downsample):
if not downsample:
k_first = 3
skip_stride = 1
k_skip = 1
else:
k_first = 4
skip_stride = 2
k_skip = 2
return [
Dense(nn.Conv2d(in_filters, out_filters, k_first, stride=skip_stride, padding=1)),
nn.ReLU(),
Dense(nn.Conv2d(in_filters, out_filters, k_skip, stride=skip_stride, padding=0),
None,
nn.Conv2d(out_filters, out_filters, k, stride=1, padding=1)),
nn.ReLU()
]
conv1 = [nn.Conv2d(3,16,3,stride=1,padding=1), nn.ReLU()]
conv2 = block(16,16*factor,3, False)
for _ in range(N):
conv2.extend(block(16*factor,16*factor,3, False))
conv3 = block(16*factor,32*factor,3, True)
for _ in range(N-1):
conv3.extend(block(32*factor,32*factor,3, False))
conv4 = block(32*factor,64*factor,3, True)
for _ in range(N-1):
conv4.extend(block(64*factor,64*factor,3, False))
layers = (
conv1 +
conv2 +
conv3 +
conv4 +
[Flatten(),
nn.Linear(64*factor*8*8,1000),
nn.ReLU(),
nn.Linear(1000, 10)]
)
model = DenseSequential(
*layers
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
return model
def argparser(batch_size=50, epochs=20, seed=0, verbose=1, lr=1e-3,
epsilon=0.1, starting_epsilon=None,
proj=None,
norm_train='l1', norm_test='l1',
opt='sgd', momentum=0.9, weight_decay=5e-4):
parser = argparse.ArgumentParser()
# optimizer settings
parser.add_argument('--opt', default=opt)
parser.add_argument('--momentum', type=float, default=momentum)
parser.add_argument('--weight_decay', type=float, default=weight_decay)
parser.add_argument('--batch_size', type=int, default=batch_size)
parser.add_argument('--test_batch_size', type=int, default=batch_size)
parser.add_argument('--epochs', type=int, default=epochs)
parser.add_argument("--lr", type=float, default=lr)
# epsilon settings
parser.add_argument("--epsilon", type=float, default=epsilon)
parser.add_argument("--starting_epsilon", type=float, default=starting_epsilon)
parser.add_argument('--schedule_length', type=int, default=10)
# projection settings
parser.add_argument('--proj', type=int, default=proj)
parser.add_argument('--norm_train', default=norm_train)
parser.add_argument('--norm_test', default=norm_test)
# model arguments
parser.add_argument('--model', default=None)
parser.add_argument('--model_factor', type=int, default=8)
parser.add_argument('--cascade', type=int, default=1)
parser.add_argument('--method', default=None)
parser.add_argument('--resnet_N', type=int, default=1)
parser.add_argument('--resnet_factor', type=int, default=1)
# other arguments
parser.add_argument('--prefix')
parser.add_argument('--load')
parser.add_argument('--real_time', action='store_true')
parser.add_argument('--seed', type=int, default=seed)
parser.add_argument('--verbose', type=int, default=verbose)
parser.add_argument('--cuda_ids', default=None)
parser.add_argument('--print_log', type=bool, default=True)
args = parser.parse_args()
if args.starting_epsilon is None:
args.starting_epsilon = args.epsilon
if args.prefix:
if args.model is not None:
args.prefix += '_'+args.model
if args.method is not None:
args.prefix += '_'+args.method
banned = ['verbose', 'prefix',
'resume', 'baseline', 'eval',
'method', 'model', 'cuda_ids', 'load', 'real_time',
'test_batch_size']
if args.method == 'baseline':
banned += ['epsilon', 'starting_epsilon', 'schedule_length',
'l1_test', 'l1_train', 'm', 'l1_proj']
# Ignore these parameters for filename since we never change them
banned += ['momentum', 'weight_decay']
if args.cascade == 1:
banned += ['cascade']
# if not using a model that uses model_factor,
# ignore model_factor
if args.model not in ['wide', 'deep']:
banned += ['model_factor']
# if args.model != 'resnet':
banned += ['resnet_N', 'resnet_factor']
for arg in sorted(vars(args)):
if arg not in banned and getattr(args,arg) is not None:
args.prefix += '_' + arg + '_' +str(getattr(args, arg))
if args.schedule_length > args.epochs:
raise ValueError('Schedule length for epsilon ({}) is greater than '
'number of epochs ({})'.format(args.schedule_length, args.epochs))
else:
args.prefix = 'temporary'
if args.cuda_ids is not None:
print('Setting CUDA_VISIBLE_DEVICES to {}'.format(args.cuda_ids))
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_ids
return args
def args2kwargs(args, X=None):
if args.proj is not None:
kwargs = {
'proj' : args.proj,
}
else:
kwargs = {
}
if args.cuda_ids is not None:
device_ids = []
for id_str in args.cuda_ids.split(","):
print('id_str: ',id_str,int(id_str))
device_ids.append(int(id_str))
if len(device_ids) > 1:
kwargs['parallel'] = True
kwargs['device_ids'] = device_ids
return kwargs
def argparser_evaluate(epsilon=0.1, norm='l1'):
parser = argparse.ArgumentParser()
parser.add_argument("--epsilon", type=float, default=epsilon)
parser.add_argument('--proj', type=int, default=None)
parser.add_argument('--norm', default=norm)
parser.add_argument('--model', default=None)
parser.add_argument('--dataset', default='mnist')
parser.add_argument('--load')
parser.add_argument('--output')
parser.add_argument('--real_time', action='store_true')
# parser.add_argument('--seed', type=int, default=seed)
parser.add_argument('--verbose', type=int, default=True)
parser.add_argument('--cuda_ids', default=None)
parser.add_argument('--batch_size', type=int, default=1)
args = parser.parse_args()
if args.cuda_ids is not None:
print('Setting CUDA_VISIBLE_DEVICES to {}'.format(args.cuda_ids))
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_ids
return args | 15,454 | 32.524946 | 134 | py |
ensembleKW | ensembleKW-main/examples/svhn.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import setproctitle
import argparse
import problems as pblm
from trainer import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--epsilon", type=float, default=0.1)
parser.add_argument("--starting_epsilon", type=float, default=None)
parser.add_argument('--prefix')
parser.add_argument('--baseline', action='store_true')
parser.add_argument('--verbose', type=int, default='1')
parser.add_argument('--alpha_grad', action='store_true')
parser.add_argument('--scatter_grad', action='store_true')
parser.add_argument('--l1_proj', type=int, default=None)
args = parser.parse_args()
args.prefix = args.prefix or 'svhn_conv_{:.4f}_{:.4f}_0'.format(args.epsilon, args.lr).replace(".","_")
setproctitle.setproctitle(args.prefix)
train_log = open(args.prefix + "_train.log", "w")
test_log = open(args.prefix + "_test.log", "w")
train_loader, test_loader = pblm.svhn_loaders(args.batch_size)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# new svhn
model = pblm.svhn_model().cuda()
opt = optim.Adam(model.parameters(), lr=args.lr)
for t in range(args.epochs):
if args.baseline:
train_baseline(train_loader, model, opt, t, train_log, args.verbose)
evaluate_baseline(test_loader, model, t, test_log, args.verbose)
else:
if t <= args.epochs//2 and args.starting_epsilon is not None:
epsilon = args.starting_epsilon + (t/(args.epochs//2))*(args.epsilon - args.starting_epsilon)
else:
epsilon = args.epsilon
train_robust(train_loader, model, opt, epsilon, t, train_log,
args.verbose,
args.alpha_grad, args.scatter_grad, l1_proj=args.l1_proj)
evaluate_robust(test_loader, model, args.epsilon, t, test_log, args.verbose)
torch.save(model.state_dict(), args.prefix + "_model.pth") | 2,423 | 38.737705 | 109 | py |
ensembleKW | ensembleKW-main/examples/ensemble.py | import torch
from evaluate import select_mnist_model, select_cifar_model
model_type = "small"
eps = "8px"
data = "cifar"
l = "inf"
if data == "mnist":
select_model = select_mnist_model
elif data == "cifar":
select_model = select_cifar_model
models = []
dir="./../models/seq_trained/l_"+str(l)+"/"
name_seq = str(data)+"_"+str(model_type)+"_"+str(eps)+".pth"
# name_seq = str(data)+"_"+str(model_type)+".pth"
d = torch.load(dir+name_seq)
sd = d['state_dict'][0]
m = select_model(model_type)
m.load_state_dict(sd)
models.append(m)
for id in range(2):
dir="./../models/non_seq_trained/l_"+str(l)+"/"
name = "more_"+str(data)+"_"+str(model_type)+"_"+str(eps)+"_"+str(id+1)+".pth"
d = torch.load(dir+name)
sd = d['state_dict'][0]
m = select_model(model_type)
m.load_state_dict(sd)
models.append(m)
print("number of models: ", len(models))
torch.save({
'state_dict' : [m.state_dict() for m in models],
'epoch' : 60,
}, "./../models/non_seq_trained/"+name_seq)
| 1,006 | 26.972222 | 82 | py |
ensembleKW | ensembleKW-main/examples/cifar.py | # import waitGPU
# import setGPU
# waitGPU.wait(utilization=20, available_memory=10000, interval=60)
# waitGPU.wait(gpu_ids=[1,3], utilization=20, available_memory=10000, interval=60)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import random
import setproctitle
import problems as pblm
from trainer import *
import math
import numpy
def select_model(m):
if m == 'large':
# raise ValueError
model = pblm.cifar_model_large().cuda()
elif m == 'resnet':
model = pblm.cifar_model_resnet(N=args.resnet_N, factor=args.resnet_factor).cuda()
else:
model = pblm.cifar_model().cuda()
return model
if __name__ == "__main__":
args = pblm.argparser(epsilon = 0.0347, starting_epsilon=0.001, batch_size = 50,
opt='sgd', lr=0.05)
print("saving file to {}".format(args.prefix))
setproctitle.setproctitle(args.prefix)
train_log = open(args.prefix + "_train.log", "w")
test_log = open(args.prefix + "_test.log", "w")
train_loader, _ = pblm.cifar_loaders(args.batch_size)
_, test_loader = pblm.cifar_loaders(args.test_batch_size)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random.seed(0)
numpy.random.seed(0)
sampler_indices = []
model = [select_model(args.model)]
kwargs = pblm.args2kwargs(args)
best_err = 1
for _ in range(0,args.cascade):
if _ > 0:
# reduce dataset to just uncertified examples
print("Reducing dataset...")
train_loader = sampler_robust_cascade(train_loader, model, args.epsilon,
args.test_batch_size,
norm_type=args.norm_test, **kwargs)
if train_loader is None:
print('No more examples, terminating')
break
sampler_indices.append(train_loader.sampler.indices)
print("Adding a new model")
model.append(select_model(args.model))
if args.opt == 'adam':
opt = optim.Adam(model[-1].parameters(), lr=args.lr)
elif args.opt == 'sgd':
opt = optim.SGD(model[-1].parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise ValueError("Unknown optimizer")
lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.5)
eps_schedule = np.linspace(args.starting_epsilon,
args.epsilon,
args.schedule_length)
for t in range(args.epochs):
lr_scheduler.step(epoch=max(t-len(eps_schedule), 0))
if t < len(eps_schedule) and args.starting_epsilon is not None:
epsilon = float(eps_schedule[t])
else:
epsilon = args.epsilon
# standard training
if args.method == 'baseline':
train_baseline(train_loader, model[0], opt, t, train_log,
args.verbose)
err = evaluate_baseline(test_loader, model[0], t, test_log,
args.verbose)
# madry training
elif args.method=='madry':
train_madry(train_loader, model[0], args.epsilon,
opt, t, train_log, args.verbose)
err = evaluate_madry(test_loader, model[0], args.epsilon,
t, test_log, args.verbose)
# robust cascade training
elif args.cascade > 1:
train_robust(train_loader, model[-1], opt, epsilon, t,
train_log, args.verbose, args.real_time,
norm_type=args.norm_train, bounded_input=False,
clip_grad=1, **kwargs)
err = evaluate_robust_cascade(test_loader, model,
args.epsilon, t, test_log, args.verbose,
norm_type=args.norm_test, bounded_input=False,
**kwargs)
# robust training
else:
train_robust(train_loader, model[0], opt, epsilon, t,
train_log, args.verbose, args.real_time,
norm_type=args.norm_train, bounded_input=False, clip_grad=1,
**kwargs)
err = evaluate_robust(test_loader, model[0], args.epsilon, t,
test_log, args.verbose, args.real_time,
norm_type=args.norm_test, bounded_input=False,
**kwargs)
if err < best_err:
best_err = err
torch.save({
'state_dict' : [m.state_dict() for m in model],
'err' : best_err,
'epoch' : t,
'sampler_indices' : sampler_indices
}, args.prefix + "_best.pth")
torch.save({
'state_dict': [m.state_dict() for m in model],
'err' : err,
'epoch' : t,
'sampler_indices' : sampler_indices
}, args.prefix + "_checkpoint.pth")
| 5,550 | 36.506757 | 90 | py |
ensembleKW | ensembleKW-main/examples/trainer.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from convex_adversarial import robust_loss, robust_loss_parallel
import torch.optim as optim
import numpy as np
import time
import gc
from attacks import _pgd
DEBUG = False
def train_robust(loader, model, opt, epsilon, epoch, log, verbose,
real_time=False, clip_grad=None, print_log=True, **kwargs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
robust_losses = AverageMeter()
robust_errors = AverageMeter()
model.train()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
data_time.update(time.time() - end)
with torch.no_grad():
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.max(1)[1] != y).float().sum() / X.size(0)
robust_ce, robust_err = robust_loss(model, epsilon,
Variable(X), Variable(y),
**kwargs)
opt.zero_grad()
robust_ce.backward()
if clip_grad:
nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
opt.step()
# measure accuracy and record loss
losses.update(ce.item(), X.size(0))
errors.update(err.item(), X.size(0))
robust_losses.update(robust_ce.detach().item(), X.size(0))
robust_errors.update(robust_err, X.size(0))
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
if print_log:
print(epoch, i, robust_ce.detach().item(),
robust_err, ce.item(), err.item(), file=log)
if verbose and (i % verbose == 0 or real_time):
endline = '\n' if i % verbose == 0 else '\r'
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\t'
'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses, errors=errors,
rloss = robust_losses, rerrors = robust_errors), end=endline)
log.flush()
del X, y, robust_ce, out, ce, err, robust_err
if DEBUG and i ==10:
break
print('')
torch.cuda.empty_cache()
def evaluate_robust(loader, model, epsilon, epoch, log, verbose,
real_time=False, parallel=False, print_log=True, **kwargs):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
robust_losses = AverageMeter()
robust_errors = AverageMeter()
model.eval()
end = time.time()
torch.set_grad_enabled(False)
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
robust_ce, robust_err = robust_loss(model, epsilon, X, y, **kwargs)
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.max(1)[1] != y).float().sum() / X.size(0)
# _,pgd_err = _pgd(model, Variable(X), Variable(y), epsilon)
# measure accuracy and record loss
losses.update(ce.item(), X.size(0))
errors.update(err, X.size(0))
robust_losses.update(robust_ce.item(), X.size(0))
robust_errors.update(robust_err, X.size(0))
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
if print_log:
print(epoch, i, robust_ce.item(), robust_err, ce.item(), err.item(),
file=log)
if verbose and (i % verbose == 0 or real_time):
# print(epoch, i, robust_ce.data[0], robust_err, ce.data[0], err)
endline = '\n' if i % verbose == 0 else '\r'
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Robust loss {rloss.val:.3f} ({rloss.avg:.3f})\t'
'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {error.val:.3f} ({error.avg:.3f})'.format(
i, len(loader), batch_time=batch_time,
loss=losses, error=errors, rloss = robust_losses,
rerrors = robust_errors), end=endline)
log.flush()
del X, y, robust_ce, out, ce
if DEBUG and i ==10:
break
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
print('')
print(' * Robust error {rerror.avg:.3f}\t'
'Error {error.avg:.3f}'
.format(rerror=robust_errors, error=errors))
return robust_errors.avg
def train_baseline(loader, model, opt, epoch, log, verbose):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
model.train()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
data_time.update(time.time() - end)
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.data.max(1)[1] != y).float().sum() / X.size(0)
opt.zero_grad()
ce.backward()
opt.step()
batch_time.update(time.time()-end)
end = time.time()
losses.update(ce.data[0], X.size(0))
errors.update(err, X.size(0))
print(epoch, i, ce.data[0], err, file=log)
if verbose and i % verbose == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses, errors=errors))
log.flush()
def evaluate_baseline(loader, model, epoch, log, verbose):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
model.eval()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.data.max(1)[1] != y).float().sum() / X.size(0)
# print to logfile
print(epoch, i, ce.data[0], err, file=log)
# measure accuracy and record loss
losses.update(ce.data[0], X.size(0))
errors.update(err, X.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if verbose and i % verbose == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {error.val:.3f} ({error.avg:.3f})'.format(
i, len(loader), batch_time=batch_time, loss=losses,
error=errors))
log.flush()
print(' * Error {error.avg:.3f}'
.format(error=errors))
return errors.avg
def train_madry(loader, model, epsilon, opt, epoch, log, verbose):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
plosses = AverageMeter()
perrors = AverageMeter()
model.train()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
data_time.update(time.time() - end)
# # perturb
X_pgd = Variable(X, requires_grad=True)
for _ in range(50):
opt_pgd = optim.Adam([X_pgd], lr=1e-3)
opt.zero_grad()
loss = nn.CrossEntropyLoss()(model(X_pgd), Variable(y))
loss.backward()
eta = 0.01*X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
# adjust to be within [-epsilon, epsilon]
eta = torch.clamp(X_pgd.data - X, -epsilon, epsilon)
X_pgd.data = X + eta
X_pgd.data = torch.clamp(X_pgd.data, 0, 1)
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.data.max(1)[1] != y).float().sum() / X.size(0)
pout = model(Variable(X_pgd.data))
pce = nn.CrossEntropyLoss()(pout, Variable(y))
perr = (pout.data.max(1)[1] != y).float().sum() / X.size(0)
opt.zero_grad()
pce.backward()
opt.step()
batch_time.update(time.time()-end)
end = time.time()
losses.update(ce.item(), X.size(0))
errors.update(err, X.size(0))
plosses.update(pce.item(), X.size(0))
perrors.update(perr, X.size(0))
print(epoch, i, ce.item(), err, file=log)
if verbose and i % verbose == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'PGD Loss {ploss.val:.4f} ({ploss.avg:.4f})\t'
'PGD Error {perrors.val:.3f} ({perrors.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {errors.val:.3f} ({errors.avg:.3f})'.format(
epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses, errors=errors,
ploss=plosses, perrors=perrors))
log.flush()
def evaluate_madry(loader, model, epsilon, epoch, log, verbose):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
perrors = AverageMeter()
model.eval()
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda()
out = model(Variable(X))
ce = nn.CrossEntropyLoss()(out, Variable(y))
err = (out.data.max(1)[1] != y).float().sum() / X.size(0)
# # perturb
_, pgd_err = _pgd(model, Variable(X), Variable(y), epsilon)
# print to logfile
print(epoch, i, ce.item(), err, file=log)
# measure accuracy and record loss
losses.update(ce.item(), X.size(0))
errors.update(err, X.size(0))
perrors.update(pgd_err, X.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if verbose and i % verbose == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'PGD Error {perror.val:.3f} ({perror.avg:.3f})\t'
'Error {error.val:.3f} ({error.avg:.3f})'.format(
i, len(loader), batch_time=batch_time, loss=losses,
error=errors, perror=perrors))
log.flush()
print(' * PGD error {perror.avg:.3f}\t'
'Error {error.avg:.3f}'
.format(error=errors, perror=perrors))
return errors.avg
def robust_loss_cascade(models, epsilon, X, y, **kwargs):
total_robust_ce = 0.
total_ce = 0.
total_robust_err = 0.
total_err = 0.
batch_size = float(X.size(0))
I = torch.arange(X.size(0)).type_as(y.data)
if X.size(0) == 1:
rl = robust_loss_parallel
else:
rl = robust_loss
for j,model in enumerate(models[:-1]):
out = model(X)
ce = nn.CrossEntropyLoss(reduce=False)(out, y)
_, uncertified = rl(model, epsilon, X,
out.max(1)[1],
size_average=False, **kwargs)
certified = ~uncertified
l = []
if certified.sum() == 0:
pass
# print("Warning: Cascade stage {} has no certified values.".format(j+1))
else:
X_cert = X[Variable(certified.nonzero()[:,0])]
y_cert = y[Variable(certified.nonzero()[:,0])]
ce = ce[Variable(certified.nonzero()[:,0])]
out = out[Variable(certified.nonzero()[:,0])]
err = (out.data.max(1)[1] != y_cert.data).float()
robust_ce, robust_err = rl(model, epsilon,
X_cert,
y_cert,
size_average=False,
**kwargs)
# add statistics for certified examples
total_robust_ce += robust_ce.sum()
total_ce += ce.data.sum()
total_robust_err += robust_err.sum()
total_err += err.sum()
l.append(certified.sum())
# reduce data set to uncertified examples
if uncertified.sum() > 0:
X = X[Variable(uncertified.nonzero()[:,0])]
y = y[Variable(uncertified.nonzero()[:,0])]
I = I[uncertified.nonzero()[:,0]]
else:
robust_ce = total_robust_ce/batch_size
ce = total_ce/batch_size
robust_err = total_robust_err.item()/batch_size
err = total_err.item()/batch_size
return robust_ce, robust_err, ce, err, None
####################################################################
# compute normal ce and robust ce for the last model
out = models[-1](X)
ce = nn.CrossEntropyLoss(reduce=False)(out, y)
err = (out.data.max(1)[1] != y.data).float()
robust_ce, robust_err = rl(models[-1], epsilon, X, y,
size_average=False, **kwargs)
# update statistics with the remaining model and take the average
total_robust_ce += robust_ce.sum()
total_ce += ce.data.sum()
total_robust_err += robust_err.sum()
total_err += err.sum()
robust_ce = total_robust_ce/batch_size
ce = total_ce/batch_size
robust_err = total_robust_err.item()/batch_size
err = total_err.item()/batch_size
_, uncertified = rl(models[-1], epsilon,
X,
out.max(1)[1],
size_average=False,
**kwargs)
if uncertified.sum() > 0:
I = I[uncertified.nonzero()[:,0]]
else:
I = None
return robust_ce, robust_err, ce, err, I
def sampler_robust_cascade(loader, models, epsilon, batch_size, **kwargs):
torch.set_grad_enabled(False)
dataset = loader.dataset
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, pin_memory=True)
l = []
start = 0
total = 0
for i, (X,y) in enumerate(loader):
print('Certifying minibatch {}/{} [current total: {}/{}]'.format(i, len(loader), total, len(dataset)), end='\r')
X = X.cuda()
y = y.cuda()
_, _, _, _, uncertified = robust_loss_cascade(models, epsilon,
Variable(X),
Variable(y),
**kwargs)
if uncertified is not None:
l.append(uncertified+start)
total += len(uncertified)
start += X.size(0)
if DEBUG and i ==10:
break
print('')
torch.set_grad_enabled(True)
if len(l) > 0:
total = torch.cat(l)
sampler = torch.utils.data.sampler.SubsetRandomSampler(total)
return torch.utils.data.DataLoader(dataset, batch_size=loader.batch_size, shuffle=False, pin_memory=True, sampler=sampler)
else:
return None
def evaluate_robust_cascade(loader, models, epsilon, epoch, log, verbose, **kwargs):
batch_time = AverageMeter()
losses = AverageMeter()
errors = AverageMeter()
robust_losses = AverageMeter()
robust_errors = AverageMeter()
for model in models:
model.eval()
torch.set_grad_enabled(False)
end = time.time()
for i, (X,y) in enumerate(loader):
X,y = X.cuda(), y.cuda().long()
if y.dim() == 2:
y = y.squeeze(1)
robust_ce, robust_err, ce, err, _ = robust_loss_cascade(models,
epsilon,
Variable(X),
Variable(y),
**kwargs)
# measure accuracy and record loss
losses.update(ce, X.size(0))
errors.update(err, X.size(0))
robust_losses.update(robust_ce.item(), X.size(0))
robust_errors.update(robust_err, X.size(0))
# measure elapsed time
batch_time.update(time.time()-end)
end = time.time()
print(epoch, i, robust_ce.item(), robust_err, ce.item(), err,
file=log)
if verbose and i % verbose == 0:
endline = '\n' if i % verbose == 0 else '\r'
# print(epoch, i, robust_ce.data[0], robust_err, ce.data[0], err)
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Robust loss {rloss.val:.3f} ({rloss.avg:.3f})\t'
'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error {error.val:.3f} ({error.avg:.3f})'.format(
i, len(loader), batch_time=batch_time,
loss=losses, error=errors, rloss = robust_losses,
rerrors = robust_errors), end=endline)
log.flush()
del X, y, robust_ce, ce
if DEBUG and i == 10:
break
torch.cuda.empty_cache()
print('')
print(' * Robust error {rerror.avg:.3f}\t'
'Error {error.avg:.3f}'
.format(rerror=robust_errors, error=errors))
torch.set_grad_enabled(True)
return robust_errors.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 18,991 | 34.235622 | 130 | py |
ensembleKW | ensembleKW-main/examples/attacks.py | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from convex_adversarial import robust_loss
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def mean(l):
return sum(l)/len(l)
def _fgs(model, X, y, epsilon):
opt = optim.Adam([X], lr=1e-3)
out = model(X)
ce = nn.CrossEntropyLoss()(out, y)
err = (out.data.max(1)[1] != y.data).float().sum() / X.size(0)
opt.zero_grad()
ce.backward()
eta = X.grad.data.sign()*epsilon
X_fgs = Variable(X.data + eta)
err_fgs = (model(X_fgs).data.max(1)[1] != y.data).float().sum() / X.size(0)
return err, err_fgs
def fgs(loader, model, epsilon, verbose=False, robust=False):
return attack(loader, model, epsilon, verbose=verbose, atk=_fgs,
robust=robust)
def _pgd(model, X, y, epsilon, niters=100, alpha=0.01):
out = model(X)
ce = nn.CrossEntropyLoss()(out, y)
err = (out.data.max(1)[1] != y.data).float().sum() / X.size(0)
X_pgd = Variable(X.data, requires_grad=True)
for i in range(niters):
opt = optim.Adam([X_pgd], lr=1e-3)
opt.zero_grad()
loss = nn.CrossEntropyLoss()(model(X_pgd), y)
loss.backward()
eta = alpha*X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
# adjust to be within [-epsilon, epsilon]
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
err_pgd = (model(X_pgd).data.max(1)[1] != y.data).float().sum() / X.size(0)
return err, err_pgd
def pgd(loader, model, epsilon, niters=100, alpha=0.01, verbose=False,
robust=False):
return attack(loader, model, epsilon, verbose=verbose, atk=_pgd,
robust=robust)
def attack(loader, model, epsilon, verbose=False, atk=None,
robust=False):
total_err, total_fgs, total_robust = [],[],[]
if verbose:
print("Requiring no gradients for parameters.")
for p in model.parameters():
p.requires_grad = False
for i, (X,y) in enumerate(loader):
X,y = Variable(X.cuda(), requires_grad=True), Variable(y.cuda().long())
if y.dim() == 2:
y = y.squeeze(1)
if robust:
robust_ce, robust_err = robust_loss_batch(model, epsilon, X, y, False, False)
err, err_fgs = atk(model, X, y, epsilon)
total_err.append(err)
total_fgs.append(err_fgs)
if robust:
total_robust.append(robust_err)
if verbose:
if robust:
print('err: {} | attack: {} | robust: {}'.format(err, err_fgs, robust_err))
else:
print('err: {} | attack: {}'.format(err, err_fgs))
if robust:
print('[TOTAL] err: {} | attack: {} | robust: {}'.format(mean(total_err), mean(total_fgs), mean(total_robust)))
else:
print('[TOTAL] err: {} | attack: {}'.format(mean(total_err), mean(total_fgs)))
return total_err, total_fgs, total_robust
| 3,229 | 32.298969 | 119 | py |
ensembleKW | ensembleKW-main/examples/mnist.py | # import waitGPU
# import setGPU
# waitGPU.wait(utilization=50, available_memory=10000, interval=60)
# waitGPU.wait(gpu_ids=[1,3], utilization=20, available_memory=10000, interval=60)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
# cudnn.benchmark = True
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import setproctitle
import problems as pblm
from trainer import *
import math
import numpy as np
def select_model(m):
if m == 'large':
model = pblm.mnist_model_large().cuda()
_, test_loader = pblm.mnist_loaders(8)
elif m == 'wide':
print("Using wide model with model_factor={}".format(args.model_factor))
_, test_loader = pblm.mnist_loaders(64//args.model_factor)
model = pblm.mnist_model_wide(args.model_factor).cuda()
elif m == 'deep':
print("Using deep model with model_factor={}".format(args.model_factor))
_, test_loader = pblm.mnist_loaders(64//(2**args.model_factor))
model = pblm.mnist_model_deep(args.model_factor).cuda()
elif m == '500':
model = pblm.mnist_500().cuda()
else:
model = pblm.mnist_model().cuda()
return model
if __name__ == "__main__":
args = pblm.argparser(opt='adam', verbose=200, starting_epsilon=0.01)
print("saving file to {}".format(args.prefix))
setproctitle.setproctitle(args.prefix)
train_log = open(args.prefix + "_train.log", "w")
test_log = open(args.prefix + "_test.log", "w")
train_loader, _ = pblm.mnist_loaders(args.batch_size)
_, test_loader = pblm.mnist_loaders(args.test_batch_size)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
for X,y in train_loader:
break
kwargs = pblm.args2kwargs(args, X=Variable(X.cuda()))
best_err = 1
sampler_indices = []
model = [select_model(args.model)]
for _ in range(0,args.cascade):
if _ > 0:
# reduce dataset to just uncertified examples
print("Reducing dataset...")
train_loader = sampler_robust_cascade(train_loader, model, args.epsilon,
args.test_batch_size,
norm_type=args.norm_test, bounded_input=True, **kwargs)
if train_loader is None:
print('No more examples, terminating')
break
sampler_indices.append(train_loader.sampler.indices)
print("Adding a new model")
model.append(select_model(args.model))
if args.opt == 'adam':
opt = optim.Adam(model[-1].parameters(), lr=args.lr)
elif args.opt == 'sgd':
opt = optim.SGD(model[-1].parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise ValueError("Unknown optimizer")
lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.5)
eps_schedule = np.linspace(args.starting_epsilon,
args.epsilon,
args.schedule_length)
for t in range(args.epochs):
lr_scheduler.step(epoch=max(t-len(eps_schedule), 0))
if t < len(eps_schedule) and args.starting_epsilon is not None:
epsilon = float(eps_schedule[t])
else:
epsilon = args.epsilon
# standard training
if args.method == 'baseline':
train_baseline(train_loader, model[0], opt, t, train_log,
args.verbose)
err = evaluate_baseline(test_loader, model[0], t, test_log,
args.verbose)
# madry training
elif args.method=='madry':
train_madry(train_loader, model[0], args.epsilon,
opt, t, train_log, args.verbose)
err = evaluate_madry(test_loader, model[0], args.epsilon,
t, test_log, args.verbose)
# robust cascade training
elif args.cascade > 1:
train_robust(train_loader, model[-1], opt, epsilon, t,
train_log, args.verbose, args.real_time,
norm_type=args.norm_train, bounded_input=True,
**kwargs)
err = evaluate_robust_cascade(test_loader, model,
args.epsilon, t, test_log, args.verbose,
norm_type=args.norm_test, bounded_input=True, **kwargs)
# robust training
else:
train_robust(train_loader, model[0], opt, epsilon, t,
train_log, args.verbose, args.real_time,
norm_type=args.norm_train, bounded_input=True, **kwargs)
err = evaluate_robust(test_loader, model[0], args.epsilon,
t, test_log, args.verbose, args.real_time,
norm_type=args.norm_test, bounded_input=True, **kwargs)
if err < best_err:
best_err = err
torch.save({
'state_dict' : [m.state_dict() for m in model],
'err' : best_err,
'epoch' : t,
'sampler_indices' : sampler_indices
}, args.prefix + "_best.pth")
torch.save({
'state_dict': [m.state_dict() for m in model],
'err' : err,
'epoch' : t,
'sampler_indices' : sampler_indices
}, args.prefix + "_checkpoint.pth") | 5,878 | 38.993197 | 105 | py |
ensembleKW | ensembleKW-main/convex_adversarial/dual_network.py | import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from .utils import Dense, DenseSequential
from .dual_inputs import select_input
from .dual_layers import select_layer
import warnings
class DualNetwork(nn.Module):
def __init__(self,
net,
X,
epsilon,
proj=None,
norm_type='l1',
bounded_input=False,
input_l=0,
input_u=1,
data_parallel=True):
"""
This class creates the dual network.
net : ReLU network
X : minibatch of examples
epsilon : size of l1 norm ball to be robust against adversarial examples
alpha_grad : flag to propagate gradient through alpha
scatter_grad : flag to propagate gradient through scatter operation
l1 : size of l1 projection
l1_eps : the bound is correct up to a 1/(1-l1_eps) factor
m : number of probabilistic bounds to take the max over
"""
super(DualNetwork, self).__init__()
# need to change that if no batchnorm, can pass just a single example
if not isinstance(net, (nn.Sequential, DenseSequential)):
raise ValueError(
"Network must be a nn.Sequential or DenseSequential module")
with torch.no_grad():
if any('BatchNorm2d' in str(l.__class__.__name__) for l in net):
zs = [X]
else:
zs = [X[:1]]
nf = [zs[0].size()]
for l in net:
if isinstance(l, Dense):
zs.append(l(*zs))
else:
zs.append(l(zs[-1]))
nf.append(zs[-1].size())
# Use the bounded boxes
dual_net = [
select_input(X,
epsilon,
proj,
norm_type,
bounded_input,
l=input_l,
u=input_u)
]
for i, (in_f, out_f, layer) in enumerate(zip(nf[:-1], nf[1:], net)):
dual_layer = select_layer(layer, dual_net, X, proj, norm_type,
in_f, out_f, zs[i])
# skip last layer
if i < len(net) - 1:
for l in dual_net:
l.apply(dual_layer)
dual_net.append(dual_layer)
else:
self.last_layer = dual_layer
self.dual_net = dual_net
return
def forward(self, c):
""" For the constructed given dual network, compute the objective for
some given vector c """
nu = [-c]
nu.append(self.last_layer.T(*nu))
for l in reversed(self.dual_net[1:]):
nu.append(l.T(*nu))
dual_net = self.dual_net + [self.last_layer]
return sum(
l.objective(*nu[:min(len(dual_net) - i + 1, len(dual_net))])
for i, l in enumerate(dual_net))
class DualNetBounds(DualNetwork):
def __init__(self, *args, **kwargs):
warnings.warn("DualNetBounds is deprecated. Use the proper "
"PyTorch module DualNetwork instead. ")
super(DualNetBounds, self).__init__(*args, **kwargs)
def g(self, c):
return self(c)
class RobustBounds(nn.Module):
def __init__(self, net, epsilon, parallel=False, **kwargs):
super(RobustBounds, self).__init__()
self.net = net
self.epsilon = epsilon
self.kwargs = kwargs
self.DualNetworkClass = ParallelDualNetwork if parallel else DualNetwork
def forward(self, X, y):
num_classes = self.net[-1].out_features
dual = self.DualNetworkClass(self.net, X, self.epsilon, **self.kwargs)
c = Variable(
torch.eye(num_classes).type_as(X)[y].unsqueeze(1) -
torch.eye(num_classes).type_as(X).unsqueeze(0))
if X.is_cuda:
c = c.cuda()
f = -dual(c)
return f
def robust_loss(net,
epsilon,
X,
y,
size_average=True,
device_ids=None,
parallel=False,
return_certificate=False,
**kwargs):
reduction = 'mean' if size_average else 'none'
if parallel:
f = nn.DataParallel(RobustBounds(net, epsilon, **kwargs),
device_ids=device_ids)(X, y)
else:
f = RobustBounds(net, epsilon, **kwargs)(X, y)
err = (f.max(1)[1] != y)
certificate = (f.max(1)[1] == y)
if size_average:
err = err.sum().item() / X.size(0)
ce_loss = nn.CrossEntropyLoss(reduction=reduction)(f, y)
if return_certificate:
return ce_loss, err, certificate
return ce_loss, err
class InputSequential(nn.Sequential):
def __init__(self, *args, **kwargs):
self.i = 0
super(InputSequential, self).__init__(*args, **kwargs)
def set_start(self, i):
self.i = i
def forward(self, input):
""" Helper class to apply a sequential model starting at the ith layer """
xs = [input]
for j, module in enumerate(self._modules.values()):
if j >= self.i:
if 'Dense' in type(module).__name__:
xs.append(module(*xs))
else:
xs.append(module(xs[-1]))
return xs[-1]
class ParallelDualNetwork(DualNetwork):
def __init__(self,
net,
X,
epsilon,
proj=None,
norm_type='l1',
bounded_input=False,
input_l=0,
input_u=1):
super(DualNetwork, self).__init__()
if any('BatchNorm2d' in str(l.__class__.__name__) for l in net):
raise NotImplementedError
if X.size(0) != 1:
raise ValueError(
'Only use this function for a single example. This is '
'intended for the use case when a single example does not fit in '
'memory.')
zs = [X[:1]]
nf = [zs[0].size()]
for l in net:
if 'Dense' in type(l).__name__:
zs.append(l(*zs))
else:
zs.append(l(zs[-1]))
nf.append(zs[-1].size())
dual_net = [
select_input(X,
epsilon,
proj,
norm_type,
bounded_input,
l=input_l,
u=input_u)
]
for i, (in_f, out_f, layer) in enumerate(zip(nf[:-1], nf[1:], net)):
if isinstance(layer, nn.ReLU):
# compute bounds
D = (InputSequential(*dual_net[1:]))
Dp = nn.DataParallel(D)
zl, zu = 0, 0
for j, dual_layer in enumerate(dual_net):
D.set_start(j)
out = dual_layer.bounds(network=Dp)
zl += out[0]
zu += out[1]
dual_layer = select_layer(layer,
dual_net,
X,
proj,
norm_type,
in_f,
out_f,
zs[i],
zl=zl,
zu=zu)
else:
dual_layer = select_layer(layer, dual_net, X, proj, norm_type,
in_f, out_f, zs[i])
dual_net.append(dual_layer)
self.dual_net = dual_net[:-1]
self.last_layer = dual_net[-1]
# Data parallel versions of the loss calculation
def robust_loss_parallel(net,
epsilon,
X,
y,
proj=None,
norm_type='l1',
bounded_input=False,
size_average=True,
device_ids=None):
if any('BatchNorm2d' in str(l.__class__.__name__) for l in net):
raise NotImplementedError
if bounded_input:
raise NotImplementedError(
'parallel loss for bounded input spaces not implemented')
if X.size(0) != 1:
raise ValueError(
'Only use this function for a single example. This is '
'intended for the use case when a single example does not fit in '
'memory.')
zs = [X[:1]]
nf = [zs[0].size()]
for l in net:
if isinstance(l, Dense):
zs.append(l(*zs))
else:
zs.append(l(zs[-1]))
nf.append(zs[-1].size())
dual_net = [select_input(X, epsilon, proj, norm_type, bounded_input)]
for i, (in_f, out_f, layer) in enumerate(zip(nf[:-1], nf[1:], net)):
if isinstance(layer, nn.ReLU):
# compute bounds
D = (InputSequential(*dual_net[1:]))
Dp = nn.DataParallel(D, device_ids=device_ids)
zl, zu = 0, 0
for j, dual_layer in enumerate(dual_net):
D.set_start(j)
out = dual_layer.bounds(network=Dp)
zl += out[0]
zu += out[1]
dual_layer = select_layer(layer,
dual_net,
X,
proj,
norm_type,
in_f,
out_f,
zs[i],
zl=zl,
zu=zu)
else:
dual_layer = select_layer(layer, dual_net, X, proj, norm_type,
in_f, out_f, zs[i])
dual_net.append(dual_layer)
num_classes = net[-1].out_features
c = Variable(
torch.eye(num_classes).type_as(X)[y].unsqueeze(1) -
torch.eye(num_classes).type_as(X).unsqueeze(0))
if X.is_cuda:
c = c.cuda()
# same as f = -dual.g(c)
nu = [-c]
for l in reversed(dual_net[1:]):
nu.append(l.T(*nu))
f = -sum(
l.objective(*nu[:min(len(dual_net) - i + 1, len(dual_net))])
for i, l in enumerate(dual_net))
err = (f.max(1)[1] != y)
if size_average:
err = err.sum().item() / X.size(0)
ce_loss = nn.CrossEntropyLoss(reduce=size_average)(f, y)
return ce_loss, err | 10,789 | 32.2 | 82 | py |
ensembleKW | ensembleKW-main/convex_adversarial/dual_layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .dual import DualLayer
from .utils import full_bias, Dense
def select_layer(layer, dual_net, X, proj, norm_type, in_f, out_f, zsi,
zl=None, zu=None):
if isinstance(layer, nn.Linear):
return DualLinear(layer, out_f)
elif isinstance(layer, nn.Conv2d):
return DualConv2d(layer, out_f)
elif isinstance(layer, nn.ReLU):
if zl is None and zu is None:
zl, zu = zip(*[l.bounds() for l in dual_net])
zl, zu = sum(zl), sum(zu)
if zl is None or zu is None:
raise ValueError("Must either provide both l,u bounds or neither.")
I = ((zu > 0).detach() * (zl < 0).detach())
if proj is not None and (norm_type=='l1_median' or norm_type=='l2_normal') and I.sum().item() > proj:
return DualReLUProj(zl, zu, proj)
else:
return DualReLU(zl, zu)
elif 'Flatten' in (str(layer.__class__.__name__)):
return DualReshape(in_f, out_f)
elif isinstance(layer, Dense):
return DualDense(layer, dual_net, out_f)
elif isinstance(layer, nn.BatchNorm2d):
return DualBatchNorm2d(layer, zsi, out_f)
else:
print(layer)
raise ValueError("No module for layer {}".format(str(layer.__class__.__name__)))
def batch(A, n):
return A.view(n, -1, *A.size()[1:])
def unbatch(A):
return A.view(-1, *A.size()[2:])
class DualLinear(DualLayer):
def __init__(self, layer, out_features):
super(DualLinear, self).__init__()
if not isinstance(layer, nn.Linear):
raise ValueError("Expected nn.Linear input.")
self.layer = layer
if layer.bias is None:
self.bias = None
else:
self.bias = [full_bias(layer, out_features[1:])]
def apply(self, dual_layer):
if self.bias is not None:
self.bias.append(dual_layer(*self.bias))
def bounds(self, network=None):
if self.bias is None:
return 0,0
else:
if network is None:
b = self.bias[-1]
else:
b = network(self.bias[0])
if b is None:
return 0,0
return b,b
def objective(self, *nus):
if self.bias is None:
return 0
else:
nu = nus[-2]
nu = nu.view(nu.size(0), nu.size(1), -1)
return -nu.matmul(self.bias[0].view(-1))
def forward(self, *xs):
x = xs[-1]
if x is None:
return None
return F.linear(x, self.layer.weight)
def T(self, *xs):
x = xs[-1]
if x is None:
return None
return F.linear(x, self.layer.weight.t())
# Convolutional helper functions to minibatch large inputs for CuDNN
def conv2d(x, *args, **kwargs):
""" Minibatched inputs to conv2d """
i = 0
out = []
batch_size = 10000
while i < x.size(0):
out.append(F.conv2d(x[i:min(i+batch_size, x.size(0))], *args, **kwargs))
i += batch_size
return torch.cat(out, 0)
def conv_transpose2d(x, *args, **kwargs):
i = 0
out = []
batch_size = 10000
while i < x.size(0):
out.append(F.conv_transpose2d(x[i:min(i+batch_size, x.size(0))], *args, **kwargs))
i += batch_size
return torch.cat(out, 0)
class DualConv2d(DualLinear):
def __init__(self, layer, out_features):
super(DualLinear, self).__init__()
if not isinstance(layer, nn.Conv2d):
raise ValueError("Expected nn.Conv2d input.")
self.layer = layer
if layer.bias is None:
self.bias = None
else:
self.bias = [full_bias(layer, out_features[1:]).contiguous()]
def forward(self, *xs):
x = xs[-1]
if x is None:
return None
if xs[-1].dim() == 5:
n = x.size(0)
x = unbatch(x)
out = conv2d(x, self.layer.weight,
stride=self.layer.stride,
padding=self.layer.padding)
if xs[-1].dim() == 5:
out = batch(out, n)
return out
def T(self, *xs):
x = xs[-1]
if x is None:
return None
if xs[-1].dim() == 5:
n = x.size(0)
x = unbatch(x)
out = conv_transpose2d(x, self.layer.weight,
stride=self.layer.stride,
padding=self.layer.padding)
if xs[-1].dim() == 5:
out = batch(out, n)
return out
class DualReshape(DualLayer):
def __init__(self, in_f, out_f):
super(DualReshape, self).__init__()
self.in_f = in_f[1:]
self.out_f = out_f[1:]
def forward(self, *xs):
x = xs[-1]
if x is None:
return None
shape = x.size()[:-len(self.in_f)] + self.out_f
return x.view(shape)
def T(self, *xs):
x = xs[-1]
if x is None:
return None
shape = x.size()[:-len(self.out_f)] + self.in_f
return x.view(shape)
def apply(self, dual_layer):
pass
def bounds(self, network=None):
return 0,0
def objective(self, *nus):
return 0
class DualReLU(DualLayer):
def __init__(self, zl, zu):
super(DualReLU, self).__init__()
d = (zl >= 0).detach().type_as(zl)
I = ((zu > 0).detach() * (zl < 0).detach())
if I.sum().item() > 0:
d[I] += zu[I]/(zu[I] - zl[I])
n = d[0].numel()
if I.sum().item() > 0:
self.I_empty = False
self.I_ind = I.view(-1,n).nonzero()
self.nus = [zl.new(I.sum().item(), n).zero_()]
self.nus[-1].scatter_(1, self.I_ind[:,1,None], d[I][:,None])
self.nus[-1] = self.nus[-1].view(-1, *(d.size()[1:]))
self.I_collapse = zl.new(self.I_ind.size(0),zl.size(0)).zero_()
self.I_collapse.scatter_(1, self.I_ind[:,0][:,None], 1)
else:
self.I_empty = True
self.d = d
self.I = I
self.zl = zl
self.zu = zu
def apply(self, dual_layer):
if self.I_empty:
return
if isinstance(dual_layer, DualReLU):
self.nus.append(dual_layer(*self.nus, I_ind=self.I_ind))
else:
self.nus.append(dual_layer(*self.nus))
def bounds(self, network=None):
if self.I_empty:
return 0,0
if network is None:
nu = self.nus[-1]
else:
nu = network(self.nus[0])
if nu is None:
return 0,0
size = nu.size()
nu = nu.view(nu.size(0), -1)
zlI = self.zl[self.I]
zl = (zlI * (-nu.t()).clamp(min=0)).mm(self.I_collapse).t().contiguous()
zu = -(zlI * nu.t().clamp(min=0)).mm(self.I_collapse).t().contiguous()
zl = zl.view(-1, *(size[1:]))
zu = zu.view(-1, *(size[1:]))
return zl,zu
def objective(self, *nus):
nu_prev = nus[-1]
if self.I_empty:
return 0
n = nu_prev.size(0)
nu = nu_prev.view(n, nu_prev.size(1), -1)
zl = self.zl.view(n, -1)
I = self.I.view(n, -1)
return (nu.clamp(min=0)*zl.unsqueeze(1)).matmul(I.type_as(nu).unsqueeze(2)).squeeze(2)
def forward(self, *xs, I_ind=None):
x = xs[-1]
if x is None:
return None
if self.d.is_cuda:
d = self.d.cuda(device=x.get_device())
else:
d = self.d
if x.dim() > d.dim():
d = d.unsqueeze(1)
if I_ind is not None:
I_ind = I_ind.to(dtype=torch.long, device=x.device)
return d[I_ind[:,0]]*x
else:
return d*x
def T(self, *xs):
return self(*xs)
class DualReLUProj(DualReLU):
def __init__(self, zl, zu, k):
DualLayer.__init__(self)
d = (zl >= 0).detach().type_as(zl)
I = ((zu > 0).detach() * (zl < 0).detach())
if I.sum().item() > 0:
d[I] += zu[I]/(zu[I] - zl[I])
n = I.size(0)
self.d = d
self.I = I
self.zl = zl
self.zu = zu
if I.sum().item() == 0:
warnings.warn('ReLU projection has no origin crossing activations')
self.I_empty = True
return
else:
self.I_empty = False
nu = zl.new(n, k, *(d.size()[1:])).zero_()
nu_one = zl.new(n, *(d.size()[1:])).zero_()
if I.sum() > 0:
nu[I.unsqueeze(1).expand_as(nu)] = nu.new(I.sum().item()*k).cauchy_()
nu_one[I] = 1
nu = zl.unsqueeze(1)*nu
nu_one = zl*nu_one
self.nus = [d.unsqueeze(1)*nu]
self.nu_ones = [d*nu_one]
def apply(self, dual_layer):
if self.I_empty:
return
self.nus.append(dual_layer(*self.nus))
self.nu_ones.append(dual_layer(*self.nu_ones))
def bounds(self, network=None):
if self.I_empty:
return 0,0
if network is None:
nu = self.nus[-1]
no = self.nu_ones[-1]
else:
nu = network(self.nus[0])
no = network(self.nu_ones[0])
n = torch.median(nu.abs(), 1)[0]
# From notes:
# \sum_i l_i[nu_i]_+ \approx (-n + no)/2
# which is the negative of the term for the upper bound
# for the lower bound, use -nu and negate the output, so
# (n - no)/2 since the no term flips twice and the l1 term
# flips only once.
zl = (-n - no)/2
zu = (n - no)/2
return zl,zu
class DualDense(DualLayer):
def __init__(self, dense, net, out_features):
super(DualDense, self).__init__()
self.duals = nn.ModuleList([])
for i,W in enumerate(dense.Ws):
if isinstance(W, nn.Conv2d):
dual_layer = DualConv2d(W, out_features)
elif isinstance(W, nn.Linear):
dual_layer = DualLinear(W, out_features)
elif isinstance(W, nn.Sequential) and len(W) == 0:
dual_layer = Identity()
elif W is None:
dual_layer = None
else:
print(W)
raise ValueError("Don't know how to parse dense structure")
self.duals.append(dual_layer)
if i < len(dense.Ws)-1 and W is not None:
idx = i-len(dense.Ws)+1
# dual_ts needs to be len(dense.Ws)-i long
net[idx].dual_ts = nn.ModuleList([dual_layer] + [None]*(len(dense.Ws)-i-len(net[idx].dual_ts)-1) + list(net[idx].dual_ts))
self.dual_ts = nn.ModuleList([self.duals[-1]])
def forward(self, *xs):
duals = list(self.duals)[-min(len(xs),len(self.duals)):]
if all(W is None for W in duals):
return None
# recursively apply the dense sub-layers
out = [W(*xs[:i+1])
for i,W in zip(range(-len(duals) + len(xs), len(xs)),
duals) if W is not None]
# remove the non applicable outputs
out = [o for o in out if o is not None]
# if no applicable outputs, return None
if len(out) == 0:
return None
# otherwise, return the sum of the outputs
return sum(o for o in out if o is not None)
def T(self, *xs):
dual_ts = list(self.dual_ts)[-min(len(xs),len(self.dual_ts)):]
if all(W is None for W in dual_ts):
return None
# recursively apply the dense sub-layers
out = [W.T(*xs[:i+1])
for i,W in zip(range(-len(dual_ts) + len(xs), len(xs)),
dual_ts) if W is not None]
# remove the non applicable outputs
out = [o for o in out if o is not None]
# if no applicable outputs, return None
if len(out) == 0:
return None
# otherwise, return the sum of the outputs
return sum(o for o in out if o is not None)
def apply(self, dual_layer):
for W in self.duals:
if W is not None:
W.apply(dual_layer)
def bounds(self, network=None):
fvals = list(W.bounds(network=network) for W in self.duals
if W is not None)
l,u = zip(*fvals)
return sum(l), sum(u)
def objective(self, *nus):
fvals = list(W.objective(*nus) for W in self.duals if W is not None)
return sum(fvals)
class DualBatchNorm2d(DualLayer):
def __init__(self, layer, minibatch, out_features):
if layer.training:
minibatch = minibatch.data.transpose(0,1).contiguous()
minibatch = minibatch.view(minibatch.size(0), -1)
mu = minibatch.mean(1)
var = minibatch.var(1)
else:
mu = layer.running_mean
var = layer.running_var
eps = layer.eps
weight = layer.weight
bias = layer.bias
denom = torch.sqrt(var + eps)
self.D = (weight/denom).unsqueeze(1).unsqueeze(2)
self.ds = [((bias - weight*mu/denom).unsqueeze(1).unsqueeze
(2)).expand(out_features[1:]).contiguous()]
def forward(self, *xs):
x = xs[-1]
if x is None:
return None
return self.D*x
def T(self, *xs):
if x is None:
return None
return self(*xs)
def apply(self, dual_layer):
self.ds.append(dual_layer(*self.ds))
def bounds(self, network=None):
if network is None:
d = self.ds[-1]
else:
d = network(self.ds[0])
return d, d
def objective(self, *nus):
nu = nus[-2]
d = self.ds[0].view(-1)
nu = nu.view(nu.size(0), nu.size(1), -1)
return -nu.matmul(d)
class Identity(DualLayer):
def forward(self, *xs):
return xs[-1]
def T(self, *xs):
return xs[-1]
def apply(self, dual_layer):
pass
def bounds(self, network=None):
return 0,0
def objective(self, *nus):
return 0 | 14,219 | 29.191083 | 138 | py |
ensembleKW | ensembleKW-main/convex_adversarial/dual_inputs.py | import torch
import torch.nn as nn
from .dual import DualObject
def select_input(X, epsilon, proj, norm, bounded_input, l=0,u=1):
if proj is not None and norm=='l1_median' and X[0].numel() > proj:
if bounded_input:
return InfBallProjBounded(X,epsilon,proj, l=l, u=u)
else:
return InfBallProj(X,epsilon,proj)
elif norm == 'l1':
if bounded_input:
return InfBallBounded(X, epsilon, l=l, u=u)
else:
return InfBall(X, epsilon)
elif proj is not None and norm=='l2_normal' and X[0].numel() > proj:
return L2BallProj(X,epsilon,proj)
elif norm == 'l2':
return L2Ball(X,epsilon)
else:
raise ValueError("Unknown estimation type: {}".format(norm))
class InfBall(DualObject):
def __init__(self, X, epsilon):
super(InfBall, self).__init__()
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu_1 = [X.new(n,n)]
torch.eye(n, out=self.nu_1[0])
self.nu_1[0] = self.nu_1[0].view(-1,*X.size()[1:]).unsqueeze(0)
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu_1.append(dual_layer(*self.nu_1))
def bounds(self, network=None):
if network is None:
nu_1 = self.nu_1[-1]
nu_x = self.nu_x[-1]
else:
nu_1 = network(self.nu_1[0])
nu_x = network(self.nu_x[0])
epsilon = self.epsilon
if isinstance(epsilon, torch.Tensor):
epsilon = epsilon.view(epsilon.size(0),-1)
while epsilon.dim() < nu_1.dim():
epsilon = epsilon.unsqueeze(-1)
l1 = (epsilon*nu_1.abs()).sum(1)
return (nu_x - l1,
nu_x + l1)
def objective(self, *nus):
epsilon = self.epsilon
nu = nus[-1]
nu = nu.view(nu.size(0), nu.size(1), -1)
nu_x = nu.matmul(self.nu_x[0].view(self.nu_x[0].size(0),-1).unsqueeze(2)).squeeze(2)
if isinstance(self.epsilon, torch.Tensor):
epsilon = epsilon.view(epsilon.size(0),1,-1)
l1 = (epsilon*nu.abs()).sum(2)
return -nu_x - l1
class InfBallBounded(DualObject):
def __init__(self, X, epsilon, l=0, u=1):
super(InfBallBounded, self).__init__()
self.epsilon = epsilon
if torch.is_tensor(l):
l_ = torch.max(X-epsilon,l)
else:
l_ = (X-epsilon).clamp(min=l)
if torch.is_tensor(u):
u_ = torch.min(X+epsilon,u)
else:
u_ = (X+epsilon).clamp(max=u)
self.l = l_.view(X.size(0), 1, -1)
self.u = u_.view(X.size(0), 1, -1)
n = X[0].numel()
self.nu_x = [X]
self.nu_1 = [X.new(n,n)]
torch.eye(n, out=self.nu_1[0])
self.nu_1[0] = self.nu_1[0].view(-1,*X.size()[1:]).unsqueeze(0)
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu_1.append(dual_layer(*self.nu_1))
def bounds(self, network=None):
if network is None:
nu = self.nu_1[-1]
else:
nu = network(self.nu_1[0])
nu_pos = nu.clamp(min=0).view(nu.size(0), nu.size(1), -1)
nu_neg = nu.clamp(max=0).view(nu.size(0), nu.size(1), -1)
zu = (self.u.matmul(nu_pos) + self.l.matmul(nu_neg)).squeeze(1)
zl = (self.u.matmul(nu_neg) + self.l.matmul(nu_pos)).squeeze(1)
return (zl.view(zl.size(0), *nu.size()[2:]),
zu.view(zu.size(0), *nu.size()[2:]))
def objective(self, *nus):
nu = nus[-1]
nu_pos = nu.clamp(min=0).view(nu.size(0), nu.size(1), -1)
nu_neg = nu.clamp(max=0).view(nu.size(0), nu.size(1), -1)
u, l = self.u.unsqueeze(3).squeeze(1), self.l.unsqueeze(3).squeeze(1)
return (-nu_neg.matmul(l) - nu_pos.matmul(u)).squeeze(2)
class InfBallProj(InfBall):
def __init__(self, X, epsilon, k):
DualObject.__init__(self)
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu = [X.new(1,k,*X.size()[1:]).cauchy_()]
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu.append(dual_layer(*self.nu))
def bounds(self, network=None):
if network is None:
nu = self.nu[-1]
nu_x = self.nu_x[-1]
else:
nu = network(self.nu[0])
nu_x = network(self.nu_x[0])
l1 = torch.median(self.nu[-1].abs(), 1)[0]
return (nu_x - self.epsilon*l1,
nu_x + self.epsilon*l1)
class InfBallProjBounded(InfBallProj):
def __init__(self, X, epsilon, k, l=0, u=1):
self.epsilon = epsilon
self.nu_one_l = [(X-epsilon).clamp(min=l)]
self.nu_one_u = [(X+epsilon).clamp(max=u)]
self.nu_x = [X]
self.l = self.nu_one_l[-1].view(X.size(0), 1, -1)
self.u = self.nu_one_u[-1].view(X.size(0), 1, -1)
n = X[0].numel()
R = X.new(1,k,*X.size()[1:]).cauchy_()
self.nu_l = [R * self.nu_one_l[-1].unsqueeze(1)]
self.nu_u = [R * self.nu_one_u[-1].unsqueeze(1)]
def apply(self, dual_layer):
self.nu_l.append(dual_layer(*self.nu_l))
self.nu_one_l.append(dual_layer(*self.nu_one_l))
self.nu_u.append(dual_layer(*self.nu_u))
self.nu_one_u.append(dual_layer(*self.nu_one_u))
def bounds(self, network=None):
if network is None:
nu_u = self.nu_u[-1]
nu_one_u = self.nu_one_u[-1]
nu_l = self.nu_l[-1]
nu_one_l = self.nu_one_l[-1]
else:
nu_u = network(self.nu_u[0])
nu_one_u = network(self.nu_one_u[0])
nu_l = network(self.nu_l[0])
nu_one_l = network(self.nu_one_l[0])
nu_l1_u = torch.median(nu_u.abs(),1)[0]
nu_pos_u = (nu_l1_u + nu_one_u)/2
nu_neg_u = (-nu_l1_u + nu_one_u)/2
nu_l1_l = torch.median(nu_l.abs(),1)[0]
nu_pos_l = (nu_l1_l + nu_one_l)/2
nu_neg_l = (-nu_l1_l + nu_one_l)/2
zu = nu_pos_u + nu_neg_l
zl = nu_neg_u + nu_pos_l
return zl,zu
# L2 balls
class L2Ball(DualObject):
def __init__(self, X, epsilon):
super(L2Ball, self).__init__()
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu_1 = [X.new(n,n)]
torch.eye(n, out=self.nu_1[0])
self.nu_1[0] = self.nu_1[0].view(-1,*X.size()[1:]).unsqueeze(0)
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu_1.append(dual_layer(*self.nu_1))
def bounds(self, network=None):
if network is None:
nu_1 = self.nu_1[-1]
nu_x = self.nu_x[-1]
else:
nu_1 = network(self.nu_1[0])
nu_x = network(self.nu_x[0])
epsilon = self.epsilon
l2 = nu_1.norm(2, 1)
if isinstance(epsilon, torch.Tensor):
while epsilon.dim() < nu_x.dim():
epsilon = epsilon.unsqueeze(1)
return (nu_x - epsilon*l2,
nu_x + epsilon*l2)
def objective(self, *nus):
epsilon = self.epsilon
nu = nus[-1]
nu = nu.view(nu.size(0), nu.size(1), -1)
nu_x = nu.matmul(self.nu_x[0].view(self.nu_x[0].size(0),-1).unsqueeze(2)).squeeze(2)
if isinstance(self.epsilon, torch.Tensor):
while epsilon.dim() < nu.dim()-1:
epsilon = epsilon.unsqueeze(1)
l2 = nu.norm(2,2)
return -nu_x - epsilon*l2
class L2BallProj(L2Ball):
def __init__(self, X, epsilon, k):
DualObject.__init__(self)
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu = [X.new(1,k,*X.size()[1:]).normal_()]
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu.append(dual_layer(*self.nu))
def bounds(self, network=None):
if network is None:
nu = self.nu[-1]
nu_x = self.nu_x[-1]
else:
nu = network(self.nu[0])
nu_x = network(self.nu_x[0])
k = nu.size(1)
l2 = nu.norm(2, 1)/(k**0.5)
return (nu_x - self.epsilon*l2,
nu_x + self.epsilon*l2) | 8,336 | 31.439689 | 92 | py |
ensembleKW | ensembleKW-main/convex_adversarial/dual.py | import torch.nn as nn
from abc import ABCMeta, abstractmethod
class DualObject(nn.Module, metaclass=ABCMeta):
def __init__(self):
""" Initialize a dual layer by initializing the variables needed to
compute this layer's contribution to the upper and lower bounds.
In the paper, if this object is at layer i, this is initializing `h'
with the required cached values when nu[i]=I and nu[i]=-I.
"""
super(DualObject, self).__init__()
@abstractmethod
def apply(self, dual_layer):
""" Advance cached variables initialized in this class by the given
dual layer. """
raise NotImplementedError
@abstractmethod
def bounds(self):
""" Return this layers contribution to the upper and lower bounds. In
the paper, this is the `h' upper bound where nu is implicitly given by
c=I and c=-I. """
raise NotImplementedError
@abstractmethod
def objective(self, *nus):
""" Return this layers contribution to the objective, given some
backwards pass. In the paper, this is the `h' upper bound evaluated on a
the given nu variables.
If this is layer i, then we get as input nu[k] through nu[i].
So non-residual layers will only need nu[-1] and nu[-2]. """
raise NotImplementedError
class DualLayer(DualObject):
@abstractmethod
def forward(self, *xs):
""" Given previous inputs, apply the affine layer (forward pass) """
raise NotImplementedError
@abstractmethod
def T(self, *xs):
""" Given previous inputs, apply the transposed affine layer
(backward pass) """
raise NotImplementedError
| 1,722 | 34.163265 | 80 | py |
ensembleKW | ensembleKW-main/convex_adversarial/utils.py | import torch.nn as nn
###########################################
# Helper function to extract fully #
# shaped bias terms #
###########################################
def full_bias(l, n=None):
# expands the bias to the proper size. For convolutional layers, a full
# output dimension of n must be specified.
if isinstance(l, nn.Linear):
return l.bias.view(1,-1)
elif isinstance(l, nn.Conv2d):
if n is None:
raise ValueError("Need to pass n=<output dimension>")
b = l.bias.unsqueeze(1).unsqueeze(2)
if isinstance(n, int):
k = int((n/(b.numel()))**0.5)
return b.expand(1,b.numel(),k,k).contiguous().view(1,-1)
else:
return b.expand(1,*n)
elif isinstance(l, Dense):
return sum(full_bias(layer, n=n) for layer in l.Ws if layer is not None)
elif isinstance(l, nn.Sequential) and len(l) == 0:
return 0
else:
raise ValueError("Full bias can't be formed for given layer.")
###########################################
# Sequential models with skip connections #
###########################################
class DenseSequential(nn.Sequential):
def forward(self, x):
xs = [x]
for module in self._modules.values():
if 'Dense' in type(module).__name__:
xs.append(module(*xs))
else:
xs.append(module(xs[-1]))
return xs[-1]
class Dense(nn.Module):
def __init__(self, *Ws):
super(Dense, self).__init__()
self.Ws = nn.ModuleList(list(Ws))
if len(Ws) > 0 and hasattr(Ws[0], 'out_features'):
self.out_features = Ws[0].out_features
def forward(self, *xs):
xs = xs[-len(self.Ws):]
out = sum(W(x) for x,W in zip(xs, self.Ws) if W is not None)
return out
#######################################
# Epsilon for high probability bounds #
#######################################
import numpy as np
import time
def GR(epsilon):
return (epsilon**2)/(-0.5*np.log(1+(2/np.pi*np.log(1+epsilon))**2)
+ 2/np.pi*np.arctan(2/np.pi*np.log(1+epsilon))*np.log(1+epsilon))
def GL(epsilon):
return (epsilon**2)/(-0.5*np.log(1+(2/np.pi*np.log(1-epsilon))**2)
+ 2/np.pi*np.arctan(2/np.pi*np.log(1-epsilon))*np.log(1-epsilon))
def p_upper(epsilon, k):
return np.exp(-k*(epsilon**2)/GR(epsilon))
def p_lower(epsilon, k):
return np.exp(-k*(epsilon**2)/GL(epsilon))
def epsilon_from_model(model, X, k, delta, m):
if k is None or m is None:
raise ValueError("k and m must not be None. ")
if delta is None:
print('No delta specified, not using probabilistic bounds.')
return 0
X = X[0].unsqueeze(0)
out_features = []
for l in model:
X = l(X)
if isinstance(l, (nn.Linear, nn.Conv2d)):
out_features.append(X.numel())
num_est = sum(n for n in out_features[:-1] if k*m < n)
num_est += sum(n*i for i,n in enumerate(out_features[:-1]) if k*m < n)
print(num_est)
sub_delta = (delta/num_est)**(1/m)
l1_eps = get_epsilon(sub_delta, k)
if num_est == 0:
return 0
if l1_eps > 1:
raise ValueError('Delta too large / k too small to get probabilistic bound')
return l1_eps
def get_epsilon(delta, k, alpha=1e-2):
""" Determine the epsilon for which the estimate is accurate
with probability >(1-delta) and k projection dimensions. """
epsilon = 0.001
# probability of incorrect bound
start_time = time.time()
p_max = max(p_upper(epsilon, k), p_lower(epsilon,k))
while p_max > delta:
epsilon *= (1+alpha)
p_max = max(p_upper(epsilon, k), p_lower(epsilon,k))
if epsilon > 1:
raise ValueError('Delta too large / k too small to get probabilistic bound (epsilon > 1)')
# print(time.time()-start_time)
return epsilon | 3,974 | 33.267241 | 98 | py |
PySR | PySR-master/pysr/sr.py | """Define the PySRRegressor scikit-learn interface."""
import copy
from io import StringIO
import os
import sys
import numpy as np
import pandas as pd
import sympy
from sympy import sympify
import re
import tempfile
import shutil
from pathlib import Path
import pickle as pkl
from datetime import datetime
import warnings
from multiprocessing import cpu_count
from sklearn.base import BaseEstimator, RegressorMixin, MultiOutputMixin
from sklearn.utils import check_array, check_consistent_length, check_random_state
from sklearn.utils.validation import (
_check_feature_names_in,
check_is_fitted,
)
from .julia_helpers import (
init_julia,
_process_julia_project,
is_julia_version_greater_eq,
_escape_filename,
_load_cluster_manager,
_update_julia_project,
_load_backend,
)
from .export_numpy import CallableEquation
from .export_latex import generate_single_table, generate_multiple_tables, to_latex
from .deprecated import make_deprecated_kwargs_for_pysr_regressor
Main = None # TODO: Rename to more descriptive name like "julia_runtime"
already_ran = False
sympy_mappings = {
"div": lambda x, y: x / y,
"mult": lambda x, y: x * y,
"sqrt": lambda x: sympy.sqrt(x),
"sqrt_abs": lambda x: sympy.sqrt(abs(x)),
"square": lambda x: x**2,
"cube": lambda x: x**3,
"plus": lambda x, y: x + y,
"sub": lambda x, y: x - y,
"neg": lambda x: -x,
"pow": lambda x, y: x**y,
"pow_abs": lambda x, y: abs(x) ** y,
"cos": sympy.cos,
"sin": sympy.sin,
"tan": sympy.tan,
"cosh": sympy.cosh,
"sinh": sympy.sinh,
"tanh": sympy.tanh,
"exp": sympy.exp,
"acos": sympy.acos,
"asin": sympy.asin,
"atan": sympy.atan,
"acosh": lambda x: sympy.acosh(x),
"acosh_abs": lambda x: sympy.acosh(abs(x) + 1),
"asinh": sympy.asinh,
"atanh": lambda x: sympy.atanh(sympy.Mod(x + 1, 2) - 1),
"atanh_clip": lambda x: sympy.atanh(sympy.Mod(x + 1, 2) - 1),
"abs": abs,
"mod": sympy.Mod,
"erf": sympy.erf,
"erfc": sympy.erfc,
"log": lambda x: sympy.log(x),
"log10": lambda x: sympy.log(x, 10),
"log2": lambda x: sympy.log(x, 2),
"log1p": lambda x: sympy.log(x + 1),
"log_abs": lambda x: sympy.log(abs(x)),
"log10_abs": lambda x: sympy.log(abs(x), 10),
"log2_abs": lambda x: sympy.log(abs(x), 2),
"log1p_abs": lambda x: sympy.log(abs(x) + 1),
"floor": sympy.floor,
"ceil": sympy.ceiling,
"sign": sympy.sign,
"gamma": sympy.gamma,
}
def pysr(X, y, weights=None, **kwargs): # pragma: no cover
warnings.warn(
"Calling `pysr` is deprecated. "
"Please use `model = PySRRegressor(**params); model.fit(X, y)` going forward.",
FutureWarning,
)
model = PySRRegressor(**kwargs)
model.fit(X, y, weights=weights)
return model.equations_
def _process_constraints(binary_operators, unary_operators, constraints):
constraints = constraints.copy()
for op in unary_operators:
if op not in constraints:
constraints[op] = -1
for op in binary_operators:
if op not in constraints:
constraints[op] = (-1, -1)
if op in ["plus", "sub", "+", "-"]:
if constraints[op][0] != constraints[op][1]:
raise NotImplementedError(
"You need equal constraints on both sides for - and +, "
"due to simplification strategies."
)
elif op in ["mult", "*"]:
# Make sure the complex expression is in the left side.
if constraints[op][0] == -1:
continue
if constraints[op][1] == -1 or constraints[op][0] < constraints[op][1]:
constraints[op][0], constraints[op][1] = (
constraints[op][1],
constraints[op][0],
)
return constraints
def _maybe_create_inline_operators(
binary_operators, unary_operators, extra_sympy_mappings
):
global Main
binary_operators = binary_operators.copy()
unary_operators = unary_operators.copy()
for op_list in [binary_operators, unary_operators]:
for i, op in enumerate(op_list):
is_user_defined_operator = "(" in op
if is_user_defined_operator:
Main.eval(op)
# Cut off from the first non-alphanumeric char:
first_non_char = [j for j, char in enumerate(op) if char == "("][0]
function_name = op[:first_non_char]
# Assert that function_name only contains
# alphabetical characters, numbers,
# and underscores:
if not re.match(r"^[a-zA-Z0-9_]+$", function_name):
raise ValueError(
f"Invalid function name {function_name}. "
"Only alphanumeric characters, numbers, "
"and underscores are allowed."
)
if (extra_sympy_mappings is None) or (
not function_name in extra_sympy_mappings
):
raise ValueError(
f"Custom function {function_name} is not defined in `extra_sympy_mappings`. "
"You can define it with, "
"e.g., `model.set_params(extra_sympy_mappings={'inv': lambda x: 1/x})`, where "
"`lambda x: 1/x` is a valid SymPy function defining the operator. "
"You can also define these at initialization time."
)
op_list[i] = function_name
return binary_operators, unary_operators
def _check_assertions(
X,
use_custom_variable_names,
variable_names,
weights,
y,
):
# Check for potential errors before they happen
assert len(X.shape) == 2
assert len(y.shape) in [1, 2]
assert X.shape[0] == y.shape[0]
if weights is not None:
assert weights.shape == y.shape
assert X.shape[0] == weights.shape[0]
if use_custom_variable_names:
assert len(variable_names) == X.shape[1]
# Check none of the variable names are function names:
for var_name in variable_names:
if var_name in sympy_mappings or var_name in sympy.__dict__.keys():
raise ValueError(
f"Variable name {var_name} is already a function name."
)
# Check if alphanumeric only:
if not re.match(r"^[a-zA-Z0-9_]+$", var_name):
raise ValueError(
f"Invalid variable name {var_name}. "
"Only alphanumeric characters, numbers, "
"and underscores are allowed."
)
def best(*args, **kwargs): # pragma: no cover
raise NotImplementedError(
"`best` has been deprecated. Please use the `PySRRegressor` interface. "
"After fitting, you can return `.sympy()` to get the sympy representation "
"of the best equation."
)
def best_row(*args, **kwargs): # pragma: no cover
raise NotImplementedError(
"`best_row` has been deprecated. Please use the `PySRRegressor` interface. "
"After fitting, you can run `print(model)` to view the best equation, or "
"`model.get_best()` to return the best equation's row in `model.equations_`."
)
def best_tex(*args, **kwargs): # pragma: no cover
raise NotImplementedError(
"`best_tex` has been deprecated. Please use the `PySRRegressor` interface. "
"After fitting, you can return `.latex()` to get the sympy representation "
"of the best equation."
)
def best_callable(*args, **kwargs): # pragma: no cover
raise NotImplementedError(
"`best_callable` has been deprecated. Please use the `PySRRegressor` "
"interface. After fitting, you can use `.predict(X)` to use the best callable."
)
# Class validation constants
VALID_OPTIMIZER_ALGORITHMS = ["NelderMead", "BFGS"]
class PySRRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""
High-performance symbolic regression algorithm.
This is the scikit-learn interface for SymbolicRegression.jl.
This model will automatically search for equations which fit
a given dataset subject to a particular loss and set of
constraints.
Most default parameters have been tuned over several example equations,
but you should adjust `niterations`, `binary_operators`, `unary_operators`
to your requirements. You can view more detailed explanations of the options
on the [options page](https://astroautomata.com/PySR/options) of the
documentation.
Parameters
----------
model_selection : str
Model selection criterion when selecting a final expression from
the list of best expression at each complexity.
Can be `'accuracy'`, `'best'`, or `'score'`. Default is `'best'`.
`'accuracy'` selects the candidate model with the lowest loss
(highest accuracy).
`'score'` selects the candidate model with the highest score.
Score is defined as the negated derivative of the log-loss with
respect to complexity - if an expression has a much better
loss at a slightly higher complexity, it is preferred.
`'best'` selects the candidate model with the highest score
among expressions with a loss better than at least 1.5x the
most accurate model.
binary_operators : list[str]
List of strings for binary operators used in the search.
See the [operators page](https://astroautomata.com/PySR/operators/)
for more details.
Default is `["+", "-", "*", "/"]`.
unary_operators : list[str]
Operators which only take a single scalar as input.
For example, `"cos"` or `"exp"`.
Default is `None`.
niterations : int
Number of iterations of the algorithm to run. The best
equations are printed and migrate between populations at the
end of each iteration.
Default is `40`.
populations : int
Number of populations running.
Default is `15`.
population_size : int
Number of individuals in each population.
Default is `33`.
max_evals : int
Limits the total number of evaluations of expressions to
this number. Default is `None`.
maxsize : int
Max complexity of an equation. Default is `20`.
maxdepth : int
Max depth of an equation. You can use both `maxsize` and
`maxdepth`. `maxdepth` is by default not used.
Default is `None`.
warmup_maxsize_by : float
Whether to slowly increase max size from a small number up to
the maxsize (if greater than 0). If greater than 0, says the
fraction of training time at which the current maxsize will
reach the user-passed maxsize.
Default is `0.0`.
timeout_in_seconds : float
Make the search return early once this many seconds have passed.
Default is `None`.
constraints : dict[str, int | tuple[int,int]]
Dictionary of int (unary) or 2-tuples (binary), this enforces
maxsize constraints on the individual arguments of operators.
E.g., `'pow': (-1, 1)` says that power laws can have any
complexity left argument, but only 1 complexity in the right
argument. Use this to force more interpretable solutions.
Default is `None`.
nested_constraints : dict[str, dict]
Specifies how many times a combination of operators can be
nested. For example, `{"sin": {"cos": 0}}, "cos": {"cos": 2}}`
specifies that `cos` may never appear within a `sin`, but `sin`
can be nested with itself an unlimited number of times. The
second term specifies that `cos` can be nested up to 2 times
within a `cos`, so that `cos(cos(cos(x)))` is allowed
(as well as any combination of `+` or `-` within it), but
`cos(cos(cos(cos(x))))` is not allowed. When an operator is not
specified, it is assumed that it can be nested an unlimited
number of times. This requires that there is no operator which
is used both in the unary operators and the binary operators
(e.g., `-` could be both subtract, and negation). For binary
operators, you only need to provide a single number: both
arguments are treated the same way, and the max of each
argument is constrained.
Default is `None`.
loss : str
String of Julia code specifying an elementwise loss function.
Can either be a loss from LossFunctions.jl, or your own loss
written as a function. Examples of custom written losses include:
`myloss(x, y) = abs(x-y)` for non-weighted, or
`myloss(x, y, w) = w*abs(x-y)` for weighted.
The included losses include:
Regression: `LPDistLoss{P}()`, `L1DistLoss()`,
`L2DistLoss()` (mean square), `LogitDistLoss()`,
`HuberLoss(d)`, `L1EpsilonInsLoss(ϵ)`, `L2EpsilonInsLoss(ϵ)`,
`PeriodicLoss(c)`, `QuantileLoss(τ)`.
Classification: `ZeroOneLoss()`, `PerceptronLoss()`,
`L1HingeLoss()`, `SmoothedL1HingeLoss(γ)`,
`ModifiedHuberLoss()`, `L2MarginLoss()`, `ExpLoss()`,
`SigmoidLoss()`, `DWDMarginLoss(q)`.
Default is `"L2DistLoss()"`.
full_objective : str
Alternatively, you can specify the full objective function as
a snippet of Julia code, including any sort of custom evaluation
(including symbolic manipulations beforehand), and any sort
of loss function or regularizations. The default `full_objective`
used in SymbolicRegression.jl is roughly equal to:
```julia
function eval_loss(tree, dataset::Dataset{T,L}, options)::L where {T,L}
prediction, flag = eval_tree_array(tree, dataset.X, options)
if !flag
return L(Inf)
end
return sum((prediction .- dataset.y) .^ 2) / dataset.n
end
```
where the example elementwise loss is mean-squared error.
You may pass a function with the same arguments as this (note
that the name of the function doesn't matter). Here,
both `prediction` and `dataset.y` are 1D arrays of length `dataset.n`.
Default is `None`.
complexity_of_operators : dict[str, float]
If you would like to use a complexity other than 1 for an
operator, specify the complexity here. For example,
`{"sin": 2, "+": 1}` would give a complexity of 2 for each use
of the `sin` operator, and a complexity of 1 for each use of
the `+` operator (which is the default). You may specify real
numbers for a complexity, and the total complexity of a tree
will be rounded to the nearest integer after computing.
Default is `None`.
complexity_of_constants : float
Complexity of constants. Default is `1`.
complexity_of_variables : float
Complexity of variables. Default is `1`.
parsimony : float
Multiplicative factor for how much to punish complexity.
Default is `0.0032`.
use_frequency : bool
Whether to measure the frequency of complexities, and use that
instead of parsimony to explore equation space. Will naturally
find equations of all complexities.
Default is `True`.
use_frequency_in_tournament : bool
Whether to use the frequency mentioned above in the tournament,
rather than just the simulated annealing.
Default is `True`.
adaptive_parsimony_scaling : float
If the adaptive parsimony strategy (`use_frequency` and
`use_frequency_in_tournament`), this is how much to (exponentially)
weight the contribution. If you find that the search is only optimizing
the most complex expressions while the simpler expressions remain stagnant,
you should increase this value.
Default is `20.0`.
alpha : float
Initial temperature for simulated annealing
(requires `annealing` to be `True`).
Default is `0.1`.
annealing : bool
Whether to use annealing. Default is `False`.
early_stop_condition : float | str
Stop the search early if this loss is reached. You may also
pass a string containing a Julia function which
takes a loss and complexity as input, for example:
`"f(loss, complexity) = (loss < 0.1) && (complexity < 10)"`.
Default is `None`.
ncyclesperiteration : int
Number of total mutations to run, per 10 samples of the
population, per iteration.
Default is `550`.
fraction_replaced : float
How much of population to replace with migrating equations from
other populations.
Default is `0.000364`.
fraction_replaced_hof : float
How much of population to replace with migrating equations from
hall of fame. Default is `0.035`.
weight_add_node : float
Relative likelihood for mutation to add a node.
Default is `0.79`.
weight_insert_node : float
Relative likelihood for mutation to insert a node.
Default is `5.1`.
weight_delete_node : float
Relative likelihood for mutation to delete a node.
Default is `1.7`.
weight_do_nothing : float
Relative likelihood for mutation to leave the individual.
Default is `0.21`.
weight_mutate_constant : float
Relative likelihood for mutation to change the constant slightly
in a random direction.
Default is `0.048`.
weight_mutate_operator : float
Relative likelihood for mutation to swap an operator.
Default is `0.47`.
weight_randomize : float
Relative likelihood for mutation to completely delete and then
randomly generate the equation
Default is `0.00023`.
weight_simplify : float
Relative likelihood for mutation to simplify constant parts by evaluation
Default is `0.0020`.
weight_optimize: float
Constant optimization can also be performed as a mutation, in addition to
the normal strategy controlled by `optimize_probability` which happens
every iteration. Using it as a mutation is useful if you want to use
a large `ncyclesperiteration`, and may not optimize very often.
Default is `0.0`.
crossover_probability : float
Absolute probability of crossover-type genetic operation, instead of a mutation.
Default is `0.066`.
skip_mutation_failures : bool
Whether to skip mutation and crossover failures, rather than
simply re-sampling the current member.
Default is `True`.
migration : bool
Whether to migrate. Default is `True`.
hof_migration : bool
Whether to have the hall of fame migrate. Default is `True`.
topn : int
How many top individuals migrate from each population.
Default is `12`.
should_simplify : bool
Whether to use algebraic simplification in the search. Note that only
a few simple rules are implemented. Default is `True`.
should_optimize_constants : bool
Whether to numerically optimize constants (Nelder-Mead/Newton)
at the end of each iteration. Default is `True`.
optimizer_algorithm : str
Optimization scheme to use for optimizing constants. Can currently
be `NelderMead` or `BFGS`.
Default is `"BFGS"`.
optimizer_nrestarts : int
Number of time to restart the constants optimization process with
different initial conditions.
Default is `2`.
optimize_probability : float
Probability of optimizing the constants during a single iteration of
the evolutionary algorithm.
Default is `0.14`.
optimizer_iterations : int
Number of iterations that the constants optimizer can take.
Default is `8`.
perturbation_factor : float
Constants are perturbed by a max factor of
(perturbation_factor*T + 1). Either multiplied by this or
divided by this.
Default is `0.076`.
tournament_selection_n : int
Number of expressions to consider in each tournament.
Default is `10`.
tournament_selection_p : float
Probability of selecting the best expression in each
tournament. The probability will decay as p*(1-p)^n for other
expressions, sorted by loss.
Default is `0.86`.
procs : int
Number of processes (=number of populations running).
Default is `cpu_count()`.
multithreading : bool
Use multithreading instead of distributed backend.
Using procs=0 will turn off both. Default is `True`.
cluster_manager : str
For distributed computing, this sets the job queue system. Set
to one of "slurm", "pbs", "lsf", "sge", "qrsh", "scyld", or
"htc". If set to one of these, PySR will run in distributed
mode, and use `procs` to figure out how many processes to launch.
Default is `None`.
batching : bool
Whether to compare population members on small batches during
evolution. Still uses full dataset for comparing against hall
of fame. Default is `False`.
batch_size : int
The amount of data to use if doing batching. Default is `50`.
fast_cycle : bool
Batch over population subsamples. This is a slightly different
algorithm than regularized evolution, but does cycles 15%
faster. May be algorithmically less efficient.
Default is `False`.
turbo: bool
(Experimental) Whether to use LoopVectorization.jl to speed up the
search evaluation. Certain operators may not be supported.
Does not support 16-bit precision floats.
Default is `False`.
precision : int
What precision to use for the data. By default this is `32`
(float32), but you can select `64` or `16` as well, giving
you 64 or 16 bits of floating point precision, respectively.
If you pass complex data, the corresponding complex precision
will be used (i.e., `64` for complex128, `32` for complex64).
Default is `32`.
enable_autodiff : bool
Whether to create derivative versions of operators for automatic
differentiation. This is only necessary if you wish to compute
the gradients of an expression within a custom loss function.
Default is `False`.
random_state : int, Numpy RandomState instance or None
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Default is `None`.
deterministic : bool
Make a PySR search give the same result every run.
To use this, you must turn off parallelism
(with `procs`=0, `multithreading`=False),
and set `random_state` to a fixed seed.
Default is `False`.
warm_start : bool
Tells fit to continue from where the last call to fit finished.
If false, each call to fit will be fresh, overwriting previous results.
Default is `False`.
verbosity : int
What verbosity level to use. 0 means minimal print statements.
Default is `1e9`.
update_verbosity : int
What verbosity level to use for package updates.
Will take value of `verbosity` if not given.
Default is `None`.
progress : bool
Whether to use a progress bar instead of printing to stdout.
Default is `True`.
equation_file : str
Where to save the files (.csv extension).
Default is `None`.
temp_equation_file : bool
Whether to put the hall of fame file in the temp directory.
Deletion is then controlled with the `delete_tempfiles`
parameter.
Default is `False`.
tempdir : str
directory for the temporary files. Default is `None`.
delete_tempfiles : bool
Whether to delete the temporary files after finishing.
Default is `True`.
julia_project : str
A Julia environment location containing a Project.toml
(and potentially the source code for SymbolicRegression.jl).
Default gives the Python package directory, where a
Project.toml file should be present from the install.
update: bool
Whether to automatically update Julia packages when `fit` is called.
You should make sure that PySR is up-to-date itself first, as
the packaged Julia packages may not necessarily include all
updated dependencies.
Default is `False`.
output_jax_format : bool
Whether to create a 'jax_format' column in the output,
containing jax-callable functions and the default parameters in
a jax array.
Default is `False`.
output_torch_format : bool
Whether to create a 'torch_format' column in the output,
containing a torch module with trainable parameters.
Default is `False`.
extra_sympy_mappings : dict[str, Callable]
Provides mappings between custom `binary_operators` or
`unary_operators` defined in julia strings, to those same
operators defined in sympy.
E.G if `unary_operators=["inv(x)=1/x"]`, then for the fitted
model to be export to sympy, `extra_sympy_mappings`
would be `{"inv": lambda x: 1/x}`.
Default is `None`.
extra_jax_mappings : dict[Callable, str]
Similar to `extra_sympy_mappings` but for model export
to jax. The dictionary maps sympy functions to jax functions.
For example: `extra_jax_mappings={sympy.sin: "jnp.sin"}` maps
the `sympy.sin` function to the equivalent jax expression `jnp.sin`.
Default is `None`.
extra_torch_mappings : dict[Callable, Callable]
The same as `extra_jax_mappings` but for model export
to pytorch. Note that the dictionary keys should be callable
pytorch expressions.
For example: `extra_torch_mappings={sympy.sin: torch.sin}`.
Default is `None`.
denoise : bool
Whether to use a Gaussian Process to denoise the data before
inputting to PySR. Can help PySR fit noisy data.
Default is `False`.
select_k_features : int
Whether to run feature selection in Python using random forests,
before passing to the symbolic regression code. None means no
feature selection; an int means select that many features.
Default is `None`.
julia_kwargs : dict
Keyword arguments to pass to `julia.core.Julia(...)` to initialize
the Julia runtime. The default, when `None`, is to set `threads` equal
to `procs`, and `optimize` to 3.
Default is `None`.
**kwargs : dict
Supports deprecated keyword arguments. Other arguments will
result in an error.
Attributes
----------
equations_ : pandas.DataFrame | list[pandas.DataFrame]
Processed DataFrame containing the results of model fitting.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
nout_ : int
Number of output dimensions.
selection_mask_ : list[int] of length `select_k_features`
List of indices for input features that are selected when
`select_k_features` is set.
tempdir_ : Path
Path to the temporary equations directory.
equation_file_ : str
Output equation file name produced by the julia backend.
raw_julia_state_ : tuple[list[PyCall.jlwrap], PyCall.jlwrap]
The state for the julia SymbolicRegression.jl backend post fitting.
equation_file_contents_ : list[pandas.DataFrame]
Contents of the equation file output by the Julia backend.
show_pickle_warnings_ : bool
Whether to show warnings about what attributes can be pickled.
Examples
--------
```python
>>> import numpy as np
>>> from pysr import PySRRegressor
>>> randstate = np.random.RandomState(0)
>>> X = 2 * randstate.randn(100, 5)
>>> # y = 2.5382 * cos(x_3) + x_0 - 0.5
>>> y = 2.5382 * np.cos(X[:, 3]) + X[:, 0] ** 2 - 0.5
>>> model = PySRRegressor(
... niterations=40,
... binary_operators=["+", "*"],
... unary_operators=[
... "cos",
... "exp",
... "sin",
... "inv(x) = 1/x", # Custom operator (julia syntax)
... ],
... model_selection="best",
... loss="loss(x, y) = (x - y)^2", # Custom loss function (julia syntax)
... )
>>> model.fit(X, y)
>>> model
PySRRegressor.equations_ = [
0 0.000000 3.8552167 3.360272e+01 1
1 1.189847 (x0 * x0) 3.110905e+00 3
2 0.010626 ((x0 * x0) + -0.25573406) 3.045491e+00 5
3 0.896632 (cos(x3) + (x0 * x0)) 1.242382e+00 6
4 0.811362 ((x0 * x0) + (cos(x3) * 2.4384754)) 2.451971e-01 8
5 >>>> 13.733371 (((cos(x3) * 2.5382) + (x0 * x0)) + -0.5) 2.889755e-13 10
6 0.194695 ((x0 * x0) + (((cos(x3) + -0.063180044) * 2.53... 1.957723e-13 12
7 0.006988 ((x0 * x0) + (((cos(x3) + -0.32505524) * 1.538... 1.944089e-13 13
8 0.000955 (((((x0 * x0) + cos(x3)) + -0.8251649) + (cos(... 1.940381e-13 15
]
>>> model.score(X, y)
1.0
>>> model.predict(np.array([1,2,3,4,5]))
array([-1.15907818, -1.15907818, -1.15907818, -1.15907818, -1.15907818])
```
"""
def __init__(
self,
model_selection="best",
*,
binary_operators=None,
unary_operators=None,
niterations=40,
populations=15,
population_size=33,
max_evals=None,
maxsize=20,
maxdepth=None,
warmup_maxsize_by=0.0,
timeout_in_seconds=None,
constraints=None,
nested_constraints=None,
loss=None,
full_objective=None,
complexity_of_operators=None,
complexity_of_constants=1,
complexity_of_variables=1,
parsimony=0.0032,
use_frequency=True,
use_frequency_in_tournament=True,
adaptive_parsimony_scaling=20.0,
alpha=0.1,
annealing=False,
early_stop_condition=None,
ncyclesperiteration=550,
fraction_replaced=0.000364,
fraction_replaced_hof=0.035,
weight_add_node=0.79,
weight_insert_node=5.1,
weight_delete_node=1.7,
weight_do_nothing=0.21,
weight_mutate_constant=0.048,
weight_mutate_operator=0.47,
weight_randomize=0.00023,
weight_simplify=0.0020,
weight_optimize=0.0,
crossover_probability=0.066,
skip_mutation_failures=True,
migration=True,
hof_migration=True,
topn=12,
should_simplify=None,
should_optimize_constants=True,
optimizer_algorithm="BFGS",
optimizer_nrestarts=2,
optimize_probability=0.14,
optimizer_iterations=8,
perturbation_factor=0.076,
tournament_selection_n=10,
tournament_selection_p=0.86,
procs=cpu_count(),
multithreading=None,
cluster_manager=None,
batching=False,
batch_size=50,
fast_cycle=False,
turbo=False,
precision=32,
enable_autodiff=False,
random_state=None,
deterministic=False,
warm_start=False,
verbosity=1e9,
update_verbosity=None,
progress=True,
equation_file=None,
temp_equation_file=False,
tempdir=None,
delete_tempfiles=True,
julia_project=None,
update=False,
output_jax_format=False,
output_torch_format=False,
extra_sympy_mappings=None,
extra_torch_mappings=None,
extra_jax_mappings=None,
denoise=False,
select_k_features=None,
julia_kwargs=None,
**kwargs,
):
# Hyperparameters
# - Model search parameters
self.model_selection = model_selection
self.binary_operators = binary_operators
self.unary_operators = unary_operators
self.niterations = niterations
self.populations = populations
self.population_size = population_size
self.ncyclesperiteration = ncyclesperiteration
# - Equation Constraints
self.maxsize = maxsize
self.maxdepth = maxdepth
self.constraints = constraints
self.nested_constraints = nested_constraints
self.warmup_maxsize_by = warmup_maxsize_by
self.should_simplify = should_simplify
# - Early exit conditions:
self.max_evals = max_evals
self.timeout_in_seconds = timeout_in_seconds
self.early_stop_condition = early_stop_condition
# - Loss parameters
self.loss = loss
self.full_objective = full_objective
self.complexity_of_operators = complexity_of_operators
self.complexity_of_constants = complexity_of_constants
self.complexity_of_variables = complexity_of_variables
self.parsimony = parsimony
self.use_frequency = use_frequency
self.use_frequency_in_tournament = use_frequency_in_tournament
self.adaptive_parsimony_scaling = adaptive_parsimony_scaling
self.alpha = alpha
self.annealing = annealing
# - Evolutionary search parameters
# -- Mutation parameters
self.weight_add_node = weight_add_node
self.weight_insert_node = weight_insert_node
self.weight_delete_node = weight_delete_node
self.weight_do_nothing = weight_do_nothing
self.weight_mutate_constant = weight_mutate_constant
self.weight_mutate_operator = weight_mutate_operator
self.weight_randomize = weight_randomize
self.weight_simplify = weight_simplify
self.weight_optimize = weight_optimize
self.crossover_probability = crossover_probability
self.skip_mutation_failures = skip_mutation_failures
# -- Migration parameters
self.migration = migration
self.hof_migration = hof_migration
self.fraction_replaced = fraction_replaced
self.fraction_replaced_hof = fraction_replaced_hof
self.topn = topn
# -- Constants parameters
self.should_optimize_constants = should_optimize_constants
self.optimizer_algorithm = optimizer_algorithm
self.optimizer_nrestarts = optimizer_nrestarts
self.optimize_probability = optimize_probability
self.optimizer_iterations = optimizer_iterations
self.perturbation_factor = perturbation_factor
# -- Selection parameters
self.tournament_selection_n = tournament_selection_n
self.tournament_selection_p = tournament_selection_p
# Solver parameters
self.procs = procs
self.multithreading = multithreading
self.cluster_manager = cluster_manager
self.batching = batching
self.batch_size = batch_size
self.fast_cycle = fast_cycle
self.turbo = turbo
self.precision = precision
self.enable_autodiff = enable_autodiff
self.random_state = random_state
self.deterministic = deterministic
self.warm_start = warm_start
# Additional runtime parameters
# - Runtime user interface
self.verbosity = verbosity
self.update_verbosity = update_verbosity
self.progress = progress
# - Project management
self.equation_file = equation_file
self.temp_equation_file = temp_equation_file
self.tempdir = tempdir
self.delete_tempfiles = delete_tempfiles
self.julia_project = julia_project
self.update = update
self.output_jax_format = output_jax_format
self.output_torch_format = output_torch_format
self.extra_sympy_mappings = extra_sympy_mappings
self.extra_jax_mappings = extra_jax_mappings
self.extra_torch_mappings = extra_torch_mappings
# Pre-modelling transformation
self.denoise = denoise
self.select_k_features = select_k_features
self.julia_kwargs = julia_kwargs
# Once all valid parameters have been assigned handle the
# deprecated kwargs
if len(kwargs) > 0: # pragma: no cover
deprecated_kwargs = make_deprecated_kwargs_for_pysr_regressor()
for k, v in kwargs.items():
# Handle renamed kwargs
if k in deprecated_kwargs:
updated_kwarg_name = deprecated_kwargs[k]
setattr(self, updated_kwarg_name, v)
warnings.warn(
f"{k} has been renamed to {updated_kwarg_name} in PySRRegressor. "
"Please use that instead.",
FutureWarning,
)
# Handle kwargs that have been moved to the fit method
elif k in ["weights", "variable_names", "Xresampled"]:
warnings.warn(
f"{k} is a data dependant parameter so should be passed when fit is called. "
f"Ignoring parameter; please pass {k} during the call to fit instead.",
FutureWarning,
)
else:
raise TypeError(
f"{k} is not a valid keyword argument for PySRRegressor."
)
@classmethod
def from_file(
cls,
equation_file,
*,
binary_operators=None,
unary_operators=None,
n_features_in=None,
feature_names_in=None,
selection_mask=None,
nout=1,
**pysr_kwargs,
):
"""
Create a model from a saved model checkpoint or equation file.
Parameters
----------
equation_file : str
Path to a pickle file containing a saved model, or a csv file
containing equations.
binary_operators : list[str]
The same binary operators used when creating the model.
Not needed if loading from a pickle file.
unary_operators : list[str]
The same unary operators used when creating the model.
Not needed if loading from a pickle file.
n_features_in : int
Number of features passed to the model.
Not needed if loading from a pickle file.
feature_names_in : list[str]
Names of the features passed to the model.
Not needed if loading from a pickle file.
selection_mask : list[bool]
If using select_k_features, you must pass `model.selection_mask_` here.
Not needed if loading from a pickle file.
nout : int
Number of outputs of the model.
Not needed if loading from a pickle file.
Default is `1`.
**pysr_kwargs : dict
Any other keyword arguments to initialize the PySRRegressor object.
These will overwrite those stored in the pickle file.
Not needed if loading from a pickle file.
Returns
-------
model : PySRRegressor
The model with fitted equations.
"""
if os.path.splitext(equation_file)[1] != ".pkl":
pkl_filename = _csv_filename_to_pkl_filename(equation_file)
else:
pkl_filename = equation_file
# Try to load model from <equation_file>.pkl
print(f"Checking if {pkl_filename} exists...")
if os.path.exists(pkl_filename):
print(f"Loading model from {pkl_filename}")
assert binary_operators is None
assert unary_operators is None
assert n_features_in is None
with open(pkl_filename, "rb") as f:
model = pkl.load(f)
# Change equation_file_ to be in the same dir as the pickle file
base_dir = os.path.dirname(pkl_filename)
base_equation_file = os.path.basename(model.equation_file_)
model.equation_file_ = os.path.join(base_dir, base_equation_file)
# Update any parameters if necessary, such as
# extra_sympy_mappings:
model.set_params(**pysr_kwargs)
if "equations_" not in model.__dict__ or model.equations_ is None:
model.refresh()
return model
# Else, we re-create it.
print(
f"{equation_file} does not exist, "
"so we must create the model from scratch."
)
assert binary_operators is not None
assert unary_operators is not None
assert n_features_in is not None
# TODO: copy .bkup file if exists.
model = cls(
equation_file=equation_file,
binary_operators=binary_operators,
unary_operators=unary_operators,
**pysr_kwargs,
)
model.nout_ = nout
model.n_features_in_ = n_features_in
if feature_names_in is None:
model.feature_names_in_ = [f"x{i}" for i in range(n_features_in)]
else:
assert len(feature_names_in) == n_features_in
model.feature_names_in_ = feature_names_in
if selection_mask is None:
model.selection_mask_ = np.ones(n_features_in, dtype=bool)
else:
model.selection_mask_ = selection_mask
model.refresh(checkpoint_file=equation_file)
return model
def __repr__(self):
"""
Print all current equations fitted by the model.
The string `>>>>` denotes which equation is selected by the
`model_selection`.
"""
if not hasattr(self, "equations_") or self.equations_ is None:
return "PySRRegressor.equations_ = None"
output = "PySRRegressor.equations_ = [\n"
equations = self.equations_
if not isinstance(equations, list):
all_equations = [equations]
else:
all_equations = equations
for i, equations in enumerate(all_equations):
selected = ["" for _ in range(len(equations))]
chosen_row = idx_model_selection(equations, self.model_selection)
selected[chosen_row] = ">>>>"
repr_equations = pd.DataFrame(
dict(
pick=selected,
score=equations["score"],
equation=equations["equation"],
loss=equations["loss"],
complexity=equations["complexity"],
)
)
if len(all_equations) > 1:
output += "[\n"
for line in repr_equations.__repr__().split("\n"):
output += "\t" + line + "\n"
if len(all_equations) > 1:
output += "]"
if i < len(all_equations) - 1:
output += ", "
output += "]"
return output
def __getstate__(self):
"""
Handle pickle serialization for PySRRegressor.
The Scikit-learn standard requires estimators to be serializable via
`pickle.dumps()`. However, `PyCall.jlwrap` does not support pickle
serialization.
Thus, for `PySRRegressor` to support pickle serialization, the
`raw_julia_state_` attribute must be hidden from pickle. This will
prevent the `warm_start` of any model that is loaded via `pickle.loads()`,
but does allow all other attributes of a fitted `PySRRegressor` estimator
to be serialized. Note: Jax and Torch format equations are also removed
from the pickled instance.
"""
state = self.__dict__
show_pickle_warning = not (
"show_pickle_warnings_" in state and not state["show_pickle_warnings_"]
)
if "raw_julia_state_" in state and show_pickle_warning:
warnings.warn(
"raw_julia_state_ cannot be pickled and will be removed from the "
"serialized instance. This will prevent a `warm_start` fit of any "
"model that is deserialized via `pickle.load()`."
)
state_keys_containing_lambdas = ["extra_sympy_mappings", "extra_torch_mappings"]
for state_key in state_keys_containing_lambdas:
if state[state_key] is not None and show_pickle_warning:
warnings.warn(
f"`{state_key}` cannot be pickled and will be removed from the "
"serialized instance. When loading the model, please redefine "
f"`{state_key}` at runtime."
)
state_keys_to_clear = ["raw_julia_state_"] + state_keys_containing_lambdas
pickled_state = {
key: (None if key in state_keys_to_clear else value)
for key, value in state.items()
}
if ("equations_" in pickled_state) and (
pickled_state["equations_"] is not None
):
pickled_state["output_torch_format"] = False
pickled_state["output_jax_format"] = False
if self.nout_ == 1:
pickled_columns = ~pickled_state["equations_"].columns.isin(
["jax_format", "torch_format"]
)
pickled_state["equations_"] = (
pickled_state["equations_"].loc[:, pickled_columns].copy()
)
else:
pickled_columns = [
~dataframe.columns.isin(["jax_format", "torch_format"])
for dataframe in pickled_state["equations_"]
]
pickled_state["equations_"] = [
dataframe.loc[:, signle_pickled_columns]
for dataframe, signle_pickled_columns in zip(
pickled_state["equations_"], pickled_columns
)
]
return pickled_state
def _checkpoint(self):
"""Save the model's current state to a checkpoint file.
This should only be used internally by PySRRegressor.
"""
# Save model state:
self.show_pickle_warnings_ = False
with open(_csv_filename_to_pkl_filename(self.equation_file_), "wb") as f:
pkl.dump(self, f)
self.show_pickle_warnings_ = True
@property
def equations(self): # pragma: no cover
warnings.warn(
"PySRRegressor.equations is now deprecated. "
"Please use PySRRegressor.equations_ instead.",
FutureWarning,
)
return self.equations_
def get_best(self, index=None):
"""
Get best equation using `model_selection`.
Parameters
----------
index : int | list[int]
If you wish to select a particular equation from `self.equations_`,
give the row number here. This overrides the `model_selection`
parameter. If there are multiple output features, then pass
a list of indices with the order the same as the output feature.
Returns
-------
best_equation : pandas.Series
Dictionary representing the best expression found.
Raises
------
NotImplementedError
Raised when an invalid model selection strategy is provided.
"""
check_is_fitted(self, attributes=["equations_"])
if self.equations_ is None:
raise ValueError("No equations have been generated yet.")
if index is not None:
if isinstance(self.equations_, list):
assert isinstance(
index, list
), "With multiple output features, index must be a list."
return [eq.iloc[i] for eq, i in zip(self.equations_, index)]
return self.equations_.iloc[index]
if isinstance(self.equations_, list):
return [
eq.iloc[idx_model_selection(eq, self.model_selection)]
for eq in self.equations_
]
return self.equations_.iloc[
idx_model_selection(self.equations_, self.model_selection)
]
def _setup_equation_file(self):
"""
Set the full pathname of the equation file.
This is performed using `tempdir` and
`equation_file`.
"""
# Cast tempdir string as a Path object
self.tempdir_ = Path(tempfile.mkdtemp(dir=self.tempdir))
if self.temp_equation_file:
self.equation_file_ = self.tempdir_ / "hall_of_fame.csv"
elif self.equation_file is None:
if self.warm_start and (
hasattr(self, "equation_file_") and self.equation_file_
):
pass
else:
date_time = datetime.now().strftime("%Y-%m-%d_%H%M%S.%f")[:-3]
self.equation_file_ = "hall_of_fame_" + date_time + ".csv"
else:
self.equation_file_ = self.equation_file
self.equation_file_contents_ = None
def _validate_and_set_init_params(self):
"""
Ensure parameters passed at initialization are valid.
Also returns a dictionary of parameters to update from their
values given at initialization.
Returns
-------
packed_modified_params : dict
Dictionary of parameters to modify from their initialized
values. For example, default parameters are set here
when a parameter is left set to `None`.
"""
# Immutable parameter validation
# Ensure instance parameters are allowable values:
if self.tournament_selection_n > self.population_size:
raise ValueError(
"tournament_selection_n parameter must be smaller than population_size."
)
if self.maxsize > 40:
warnings.warn(
"Note: Using a large maxsize for the equation search will be "
"exponentially slower and use significant memory. You should consider "
"turning `use_frequency` to False, and perhaps use `warmup_maxsize_by`."
)
elif self.maxsize < 7:
raise ValueError("PySR requires a maxsize of at least 7")
if self.deterministic and not (
self.multithreading in [False, None]
and self.procs == 0
and self.random_state is not None
):
raise ValueError(
"To ensure deterministic searches, you must set `random_state` to a seed, "
"`procs` to `0`, and `multithreading` to `False` or `None`."
)
if self.random_state is not None and (
not self.deterministic or self.procs != 0
):
warnings.warn(
"Note: Setting `random_state` without also setting `deterministic` "
"to True and `procs` to 0 will result in non-deterministic searches. "
)
if self.loss is not None and self.full_objective is not None:
raise ValueError("You cannot set both `loss` and `full_objective`.")
# NotImplementedError - Values that could be supported at a later time
if self.optimizer_algorithm not in VALID_OPTIMIZER_ALGORITHMS:
raise NotImplementedError(
f"PySR currently only supports the following optimizer algorithms: {VALID_OPTIMIZER_ALGORITHMS}"
)
# 'Mutable' parameter validation
buffer_available = "buffer" in sys.stdout.__dir__()
# Params and their default values, if None is given:
default_param_mapping = {
"binary_operators": "+ * - /".split(" "),
"unary_operators": [],
"maxdepth": self.maxsize,
"constraints": {},
"multithreading": self.procs != 0 and self.cluster_manager is None,
"batch_size": 1,
"update_verbosity": self.verbosity,
"progress": buffer_available,
}
packed_modified_params = {}
for parameter, default_value in default_param_mapping.items():
parameter_value = getattr(self, parameter)
if parameter_value is None:
parameter_value = default_value
else:
# Special cases such as when binary_operators is a string
if parameter in ["binary_operators", "unary_operators"] and isinstance(
parameter_value, str
):
parameter_value = [parameter_value]
elif parameter == "batch_size" and parameter_value < 1:
warnings.warn(
"Given `batch_size` must be greater than or equal to one. "
"`batch_size` has been increased to equal one."
)
parameter_value = 1
elif parameter == "progress" and not buffer_available:
warnings.warn(
"Note: it looks like you are running in Jupyter. "
"The progress bar will be turned off."
)
parameter_value = False
packed_modified_params[parameter] = parameter_value
assert (
len(packed_modified_params["binary_operators"])
+ len(packed_modified_params["unary_operators"])
> 0
)
julia_kwargs = {}
if self.julia_kwargs is not None:
for key, value in self.julia_kwargs.items():
julia_kwargs[key] = value
if "optimize" not in julia_kwargs:
julia_kwargs["optimize"] = 3
if "threads" not in julia_kwargs and packed_modified_params["multithreading"]:
julia_kwargs["threads"] = self.procs
packed_modified_params["julia_kwargs"] = julia_kwargs
return packed_modified_params
def _validate_and_set_fit_params(self, X, y, Xresampled, weights, variable_names):
"""
Validate the parameters passed to the :term`fit` method.
This method also sets the `nout_` attribute.
Parameters
----------
X : ndarray | pandas.DataFrame
Training data of shape `(n_samples, n_features)`.
y : ndarray | pandas.DataFrame}
Target values of shape `(n_samples,)` or `(n_samples, n_targets)`.
Will be cast to `X`'s dtype if necessary.
Xresampled : ndarray | pandas.DataFrame
Resampled training data used for denoising,
of shape `(n_resampled, n_features)`.
weights : ndarray | pandas.DataFrame
Weight array of the same shape as `y`.
Each element is how to weight the mean-square-error loss
for that particular element of y.
variable_names : list[str] of length n_features
Names of each variable in the training dataset, `X`.
Returns
-------
X_validated : ndarray of shape (n_samples, n_features)
Validated training data.
y_validated : ndarray of shape (n_samples,) or (n_samples, n_targets)
Validated target data.
Xresampled : ndarray of shape (n_resampled, n_features)
Validated resampled training data used for denoising.
variable_names_validated : list[str] of length n_features
Validated list of variable names for each feature in `X`.
"""
if isinstance(X, pd.DataFrame):
if variable_names:
variable_names = None
warnings.warn(
"`variable_names` has been reset to `None` as `X` is a DataFrame. "
"Using DataFrame column names instead."
)
if X.columns.is_object() and X.columns.str.contains(" ").any():
X.columns = X.columns.str.replace(" ", "_")
warnings.warn(
"Spaces in DataFrame column names are not supported. "
"Spaces have been replaced with underscores. \n"
"Please rename the columns to valid names."
)
elif variable_names and any([" " in name for name in variable_names]):
variable_names = [name.replace(" ", "_") for name in variable_names]
warnings.warn(
"Spaces in `variable_names` are not supported. "
"Spaces have been replaced with underscores. \n"
"Please use valid names instead."
)
# Data validation and feature name fetching via sklearn
# This method sets the n_features_in_ attribute
if Xresampled is not None:
Xresampled = check_array(Xresampled)
if weights is not None:
weights = check_array(weights, ensure_2d=False)
check_consistent_length(weights, y)
X, y = self._validate_data(X=X, y=y, reset=True, multi_output=True)
self.feature_names_in_ = _check_feature_names_in(self, variable_names)
variable_names = self.feature_names_in_
# Handle multioutput data
if len(y.shape) == 1 or (len(y.shape) == 2 and y.shape[1] == 1):
y = y.reshape(-1)
elif len(y.shape) == 2:
self.nout_ = y.shape[1]
else:
raise NotImplementedError("y shape not supported!")
return X, y, Xresampled, weights, variable_names
def _pre_transform_training_data(
self, X, y, Xresampled, variable_names, random_state
):
"""
Transform the training data before fitting the symbolic regressor.
This method also updates/sets the `selection_mask_` attribute.
Parameters
----------
X : ndarray | pandas.DataFrame
Training data of shape (n_samples, n_features).
y : ndarray | pandas.DataFrame
Target values of shape (n_samples,) or (n_samples, n_targets).
Will be cast to X's dtype if necessary.
Xresampled : ndarray | pandas.DataFrame
Resampled training data, of shape `(n_resampled, n_features)`,
used for denoising.
variable_names : list[str]
Names of each variable in the training dataset, `X`.
Of length `n_features`.
random_state : int | np.RandomState
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`. Default is `None`.
Returns
-------
X_transformed : ndarray of shape (n_samples, n_features)
Transformed training data. n_samples will be equal to
`Xresampled.shape[0]` if `self.denoise` is `True`,
and `Xresampled is not None`, otherwise it will be
equal to `X.shape[0]`. n_features will be equal to
`self.select_k_features` if `self.select_k_features is not None`,
otherwise it will be equal to `X.shape[1]`
y_transformed : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Transformed target data. n_samples will be equal to
`Xresampled.shape[0]` if `self.denoise` is `True`,
and `Xresampled is not None`, otherwise it will be
equal to `X.shape[0]`.
variable_names_transformed : list[str] of length n_features
Names of each variable in the transformed dataset,
`X_transformed`.
"""
# Feature selection transformation
if self.select_k_features:
self.selection_mask_ = run_feature_selection(
X, y, self.select_k_features, random_state=random_state
)
X = X[:, self.selection_mask_]
if Xresampled is not None:
Xresampled = Xresampled[:, self.selection_mask_]
# Reduce variable_names to selection
variable_names = [variable_names[i] for i in self.selection_mask_]
# Re-perform data validation and feature name updating
X, y = self._validate_data(X=X, y=y, reset=True, multi_output=True)
# Update feature names with selected variable names
self.feature_names_in_ = _check_feature_names_in(self, variable_names)
print(f"Using features {self.feature_names_in_}")
# Denoising transformation
if self.denoise:
if self.nout_ > 1:
y = np.stack(
[
_denoise(
X, y[:, i], Xresampled=Xresampled, random_state=random_state
)[1]
for i in range(self.nout_)
],
axis=1,
)
if Xresampled is not None:
X = Xresampled
else:
X, y = _denoise(X, y, Xresampled=Xresampled, random_state=random_state)
return X, y, variable_names
def _run(self, X, y, mutated_params, weights, seed):
"""
Run the symbolic regression fitting process on the julia backend.
Parameters
----------
X : ndarray | pandas.DataFrame
Training data of shape `(n_samples, n_features)`.
y : ndarray | pandas.DataFrame
Target values of shape `(n_samples,)` or `(n_samples, n_targets)`.
Will be cast to `X`'s dtype if necessary.
mutated_params : dict[str, Any]
Dictionary of mutated versions of some parameters passed in __init__.
weights : ndarray | pandas.DataFrame
Weight array of the same shape as `y`.
Each element is how to weight the mean-square-error loss
for that particular element of y.
seed : int
Random seed for julia backend process.
Returns
-------
self : object
Reference to `self` with fitted attributes.
Raises
------
ImportError
Raised when the julia backend fails to import a package.
"""
# Need to be global as we don't want to recreate/reinstate julia for
# every new instance of PySRRegressor
global already_ran
global Main
# These are the parameters which may be modified from the ones
# specified in init, so we define them here locally:
binary_operators = mutated_params["binary_operators"]
unary_operators = mutated_params["unary_operators"]
maxdepth = mutated_params["maxdepth"]
constraints = mutated_params["constraints"]
nested_constraints = self.nested_constraints
complexity_of_operators = self.complexity_of_operators
multithreading = mutated_params["multithreading"]
cluster_manager = self.cluster_manager
batch_size = mutated_params["batch_size"]
update_verbosity = mutated_params["update_verbosity"]
progress = mutated_params["progress"]
julia_kwargs = mutated_params["julia_kwargs"]
# Start julia backend processes
if not already_ran and update_verbosity != 0:
print("Compiling Julia backend...")
Main = init_julia(self.julia_project, julia_kwargs=julia_kwargs)
if cluster_manager is not None:
cluster_manager = _load_cluster_manager(Main, cluster_manager)
if self.update:
_, is_shared = _process_julia_project(self.julia_project)
io = "devnull" if update_verbosity == 0 else "stderr"
io_arg = (
f"io={io}" if is_julia_version_greater_eq(version=(1, 6, 0)) else ""
)
_update_julia_project(Main, is_shared, io_arg)
SymbolicRegression = _load_backend(Main)
Main.plus = Main.eval("(+)")
Main.sub = Main.eval("(-)")
Main.mult = Main.eval("(*)")
Main.pow = Main.eval("(^)")
Main.div = Main.eval("(/)")
# TODO(mcranmer): These functions should be part of this class.
binary_operators, unary_operators = _maybe_create_inline_operators(
binary_operators=binary_operators,
unary_operators=unary_operators,
extra_sympy_mappings=self.extra_sympy_mappings,
)
constraints = _process_constraints(
binary_operators=binary_operators,
unary_operators=unary_operators,
constraints=constraints,
)
una_constraints = [constraints[op] for op in unary_operators]
bin_constraints = [constraints[op] for op in binary_operators]
# Parse dict into Julia Dict for nested constraints::
if nested_constraints is not None:
nested_constraints_str = "Dict("
for outer_k, outer_v in nested_constraints.items():
nested_constraints_str += f"({outer_k}) => Dict("
for inner_k, inner_v in outer_v.items():
nested_constraints_str += f"({inner_k}) => {inner_v}, "
nested_constraints_str += "), "
nested_constraints_str += ")"
nested_constraints = Main.eval(nested_constraints_str)
# Parse dict into Julia Dict for complexities:
if complexity_of_operators is not None:
complexity_of_operators_str = "Dict("
for k, v in complexity_of_operators.items():
complexity_of_operators_str += f"({k}) => {v}, "
complexity_of_operators_str += ")"
complexity_of_operators = Main.eval(complexity_of_operators_str)
custom_loss = Main.eval(self.loss)
custom_full_objective = Main.eval(self.full_objective)
early_stop_condition = Main.eval(
str(self.early_stop_condition) if self.early_stop_condition else None
)
mutation_weights = SymbolicRegression.MutationWeights(
mutate_constant=self.weight_mutate_constant,
mutate_operator=self.weight_mutate_operator,
add_node=self.weight_add_node,
insert_node=self.weight_insert_node,
delete_node=self.weight_delete_node,
simplify=self.weight_simplify,
randomize=self.weight_randomize,
do_nothing=self.weight_do_nothing,
optimize=self.weight_optimize,
)
# Call to Julia backend.
# See https://github.com/MilesCranmer/SymbolicRegression.jl/blob/master/src/OptionsStruct.jl
options = SymbolicRegression.Options(
binary_operators=Main.eval(str(binary_operators).replace("'", "")),
unary_operators=Main.eval(str(unary_operators).replace("'", "")),
bin_constraints=bin_constraints,
una_constraints=una_constraints,
complexity_of_operators=complexity_of_operators,
complexity_of_constants=self.complexity_of_constants,
complexity_of_variables=self.complexity_of_variables,
nested_constraints=nested_constraints,
elementwise_loss=custom_loss,
loss_function=custom_full_objective,
maxsize=int(self.maxsize),
output_file=_escape_filename(self.equation_file_),
npopulations=int(self.populations),
batching=self.batching,
batch_size=int(min([batch_size, len(X)]) if self.batching else len(X)),
mutation_weights=mutation_weights,
tournament_selection_p=self.tournament_selection_p,
tournament_selection_n=self.tournament_selection_n,
# These have the same name:
parsimony=self.parsimony,
alpha=self.alpha,
maxdepth=maxdepth,
fast_cycle=self.fast_cycle,
turbo=self.turbo,
enable_autodiff=self.enable_autodiff,
migration=self.migration,
hof_migration=self.hof_migration,
fraction_replaced_hof=self.fraction_replaced_hof,
should_simplify=self.should_simplify,
should_optimize_constants=self.should_optimize_constants,
warmup_maxsize_by=self.warmup_maxsize_by,
use_frequency=self.use_frequency,
use_frequency_in_tournament=self.use_frequency_in_tournament,
adaptive_parsimony_scaling=self.adaptive_parsimony_scaling,
npop=self.population_size,
ncycles_per_iteration=self.ncyclesperiteration,
fraction_replaced=self.fraction_replaced,
topn=self.topn,
verbosity=self.verbosity,
optimizer_algorithm=self.optimizer_algorithm,
optimizer_nrestarts=self.optimizer_nrestarts,
optimizer_probability=self.optimize_probability,
optimizer_iterations=self.optimizer_iterations,
perturbation_factor=self.perturbation_factor,
annealing=self.annealing,
progress=progress,
timeout_in_seconds=self.timeout_in_seconds,
crossover_probability=self.crossover_probability,
skip_mutation_failures=self.skip_mutation_failures,
max_evals=self.max_evals,
early_stop_condition=early_stop_condition,
seed=seed,
deterministic=self.deterministic,
define_helper_functions=False,
)
# Convert data to desired precision
test_X = np.array(X)
is_complex = np.issubdtype(test_X.dtype, np.complexfloating)
is_real = not is_complex
if is_real:
np_dtype = {16: np.float16, 32: np.float32, 64: np.float64}[self.precision]
else:
np_dtype = {32: np.complex64, 64: np.complex128}[self.precision]
# This converts the data into a Julia array:
Main.X = np.array(X, dtype=np_dtype).T
if len(y.shape) == 1:
Main.y = np.array(y, dtype=np_dtype)
else:
Main.y = np.array(y, dtype=np_dtype).T
if weights is not None:
if len(weights.shape) == 1:
Main.weights = np.array(weights, dtype=np_dtype)
else:
Main.weights = np.array(weights, dtype=np_dtype).T
else:
Main.weights = None
if self.procs == 0 and not multithreading:
parallelism = "serial"
elif multithreading:
parallelism = "multithreading"
else:
parallelism = "multiprocessing"
cprocs = (
None if parallelism in ["serial", "multithreading"] else int(self.procs)
)
# Call to Julia backend.
# See https://github.com/MilesCranmer/SymbolicRegression.jl/blob/master/src/SymbolicRegression.jl
self.raw_julia_state_ = SymbolicRegression.equation_search(
Main.X,
Main.y,
weights=Main.weights,
niterations=int(self.niterations),
variable_names=self.feature_names_in_.tolist(),
options=options,
numprocs=cprocs,
parallelism=parallelism,
saved_state=self.raw_julia_state_,
return_state=True,
addprocs_function=cluster_manager,
)
# Set attributes
self.equations_ = self.get_hof()
if self.delete_tempfiles:
shutil.rmtree(self.tempdir_)
already_ran = True
return self
def fit(
self,
X,
y,
Xresampled=None,
weights=None,
variable_names=None,
):
"""
Search for equations to fit the dataset and store them in `self.equations_`.
Parameters
----------
X : ndarray | pandas.DataFrame
Training data of shape (n_samples, n_features).
y : ndarray | pandas.DataFrame
Target values of shape (n_samples,) or (n_samples, n_targets).
Will be cast to X's dtype if necessary.
Xresampled : ndarray | pandas.DataFrame
Resampled training data, of shape (n_resampled, n_features),
to generate a denoised data on. This
will be used as the training data, rather than `X`.
weights : ndarray | pandas.DataFrame
Weight array of the same shape as `y`.
Each element is how to weight the mean-square-error loss
for that particular element of `y`. Alternatively,
if a custom `loss` was set, it will can be used
in arbitrary ways.
variable_names : list[str]
A list of names for the variables, rather than "x0", "x1", etc.
If `X` is a pandas dataframe, the column names will be used
instead of `variable_names`. Cannot contain spaces or special
characters. Avoid variable names which are also
function names in `sympy`, such as "N".
Returns
-------
self : object
Fitted estimator.
"""
# Init attributes that are not specified in BaseEstimator
if self.warm_start and hasattr(self, "raw_julia_state_"):
pass
else:
if hasattr(self, "raw_julia_state_"):
warnings.warn(
"The discovered expressions are being reset. "
"Please set `warm_start=True` if you wish to continue "
"to start a search where you left off.",
)
self.equations_ = None
self.nout_ = 1
self.selection_mask_ = None
self.raw_julia_state_ = None
random_state = check_random_state(self.random_state) # For np random
seed = random_state.get_state()[1][0] # For julia random
self._setup_equation_file()
mutated_params = self._validate_and_set_init_params()
X, y, Xresampled, weights, variable_names = self._validate_and_set_fit_params(
X, y, Xresampled, weights, variable_names
)
if X.shape[0] > 10000 and not self.batching:
warnings.warn(
"Note: you are running with more than 10,000 datapoints. "
"You should consider turning on batching (https://astroautomata.com/PySR/options/#batching). "
"You should also reconsider if you need that many datapoints. "
"Unless you have a large amount of noise (in which case you "
"should smooth your dataset first), generally < 10,000 datapoints "
"is enough to find a functional form with symbolic regression. "
"More datapoints will lower the search speed."
)
# Pre transformations (feature selection and denoising)
X, y, variable_names = self._pre_transform_training_data(
X, y, Xresampled, variable_names, random_state
)
# Warn about large feature counts (still warn if feature count is large
# after running feature selection)
if self.n_features_in_ >= 10:
warnings.warn(
"Note: you are running with 10 features or more. "
"Genetic algorithms like used in PySR scale poorly with large numbers of features. "
"You should run PySR for more `niterations` to ensure it can find "
"the correct variables, "
"or, alternatively, do a dimensionality reduction beforehand. "
"For example, `X = PCA(n_components=6).fit_transform(X)`, "
"using scikit-learn's `PCA` class, "
"will reduce the number of features to 6 in an interpretable way, "
"as each resultant feature "
"will be a linear combination of the original features. "
)
# Assertion checks
use_custom_variable_names = variable_names is not None
# TODO: this is always true.
_check_assertions(
X,
use_custom_variable_names,
variable_names,
weights,
y,
)
# Initially, just save model parameters, so that
# it can be loaded from an early exit:
if not self.temp_equation_file:
self._checkpoint()
# Perform the search:
self._run(X, y, mutated_params, weights=weights, seed=seed)
# Then, after fit, we save again, so the pickle file contains
# the equations:
if not self.temp_equation_file:
self._checkpoint()
return self
def refresh(self, checkpoint_file=None):
"""
Update self.equations_ with any new options passed.
For example, updating `extra_sympy_mappings`
will require a `.refresh()` to update the equations.
Parameters
----------
checkpoint_file : str
Path to checkpoint hall of fame file to be loaded.
The default will use the set `equation_file_`.
"""
if checkpoint_file:
self.equation_file_ = checkpoint_file
self.equation_file_contents_ = None
check_is_fitted(self, attributes=["equation_file_"])
self.equations_ = self.get_hof()
def predict(self, X, index=None):
"""
Predict y from input X using the equation chosen by `model_selection`.
You may see what equation is used by printing this object. X should
have the same columns as the training data.
Parameters
----------
X : ndarray | pandas.DataFrame
Training data of shape `(n_samples, n_features)`.
index : int | list[int]
If you want to compute the output of an expression using a
particular row of `self.equations_`, you may specify the index here.
For multiple output equations, you must pass a list of indices
in the same order.
Returns
-------
y_predicted : ndarray of shape (n_samples, nout_)
Values predicted by substituting `X` into the fitted symbolic
regression model.
Raises
------
ValueError
Raises if the `best_equation` cannot be evaluated.
"""
check_is_fitted(
self, attributes=["selection_mask_", "feature_names_in_", "nout_"]
)
best_equation = self.get_best(index=index)
# When X is an numpy array or a pandas dataframe with a RangeIndex,
# the self.feature_names_in_ generated during fit, for the same X,
# will cause a warning to be thrown during _validate_data.
# To avoid this, convert X to a dataframe, apply the selection mask,
# and then set the column/feature_names of X to be equal to those
# generated during fit.
if not isinstance(X, pd.DataFrame):
X = check_array(X)
X = pd.DataFrame(X)
if isinstance(X.columns, pd.RangeIndex):
if self.selection_mask_ is not None:
# RangeIndex enforces column order allowing columns to
# be correctly filtered with self.selection_mask_
X = X.iloc[:, self.selection_mask_]
X.columns = self.feature_names_in_
# Without feature information, CallableEquation/lambda_format equations
# require that the column order of X matches that of the X used during
# the fitting process. _validate_data removes this feature information
# when it converts the dataframe to an np array. Thus, to ensure feature
# order is preserved after conversion, the dataframe columns must be
# reordered/reindexed to match those of the transformed (denoised and
# feature selected) X in fit.
X = X.reindex(columns=self.feature_names_in_)
X = self._validate_data(X, reset=False)
try:
if self.nout_ > 1:
return np.stack(
[eq["lambda_format"](X) for eq in best_equation], axis=1
)
return best_equation["lambda_format"](X)
except Exception as error:
raise ValueError(
"Failed to evaluate the expression. "
"If you are using a custom operator, make sure to define it in `extra_sympy_mappings`, "
"e.g., `model.set_params(extra_sympy_mappings={'inv': lambda x: 1/x})`, where "
"`lambda x: 1/x` is a valid SymPy function defining the operator. "
"You can then run `model.refresh()` to re-load the expressions."
) from error
def sympy(self, index=None):
"""
Return sympy representation of the equation(s) chosen by `model_selection`.
Parameters
----------
index : int | list[int]
If you wish to select a particular equation from
`self.equations_`, give the index number here. This overrides
the `model_selection` parameter. If there are multiple output
features, then pass a list of indices with the order the same
as the output feature.
Returns
-------
best_equation : str, list[str] of length nout_
SymPy representation of the best equation.
"""
self.refresh()
best_equation = self.get_best(index=index)
if self.nout_ > 1:
return [eq["sympy_format"] for eq in best_equation]
return best_equation["sympy_format"]
def latex(self, index=None, precision=3):
"""
Return latex representation of the equation(s) chosen by `model_selection`.
Parameters
----------
index : int | list[int]
If you wish to select a particular equation from
`self.equations_`, give the index number here. This overrides
the `model_selection` parameter. If there are multiple output
features, then pass a list of indices with the order the same
as the output feature.
precision : int
The number of significant figures shown in the LaTeX
representation.
Default is `3`.
Returns
-------
best_equation : str or list[str] of length nout_
LaTeX expression of the best equation.
"""
self.refresh()
sympy_representation = self.sympy(index=index)
if self.nout_ > 1:
output = []
for s in sympy_representation:
latex = to_latex(s, prec=precision)
output.append(latex)
return output
return to_latex(sympy_representation, prec=precision)
def jax(self, index=None):
"""
Return jax representation of the equation(s) chosen by `model_selection`.
Each equation (multiple given if there are multiple outputs) is a dictionary
containing {"callable": func, "parameters": params}. To call `func`, pass
func(X, params). This function is differentiable using `jax.grad`.
Parameters
----------
index : int | list[int]
If you wish to select a particular equation from
`self.equations_`, give the index number here. This overrides
the `model_selection` parameter. If there are multiple output
features, then pass a list of indices with the order the same
as the output feature.
Returns
-------
best_equation : dict[str, Any]
Dictionary of callable jax function in "callable" key,
and jax array of parameters as "parameters" key.
"""
self.set_params(output_jax_format=True)
self.refresh()
best_equation = self.get_best(index=index)
if self.nout_ > 1:
return [eq["jax_format"] for eq in best_equation]
return best_equation["jax_format"]
def pytorch(self, index=None):
"""
Return pytorch representation of the equation(s) chosen by `model_selection`.
Each equation (multiple given if there are multiple outputs) is a PyTorch module
containing the parameters as trainable attributes. You can use the module like
any other PyTorch module: `module(X)`, where `X` is a tensor with the same
column ordering as trained with.
Parameters
----------
index : int | list[int]
If you wish to select a particular equation from
`self.equations_`, give the index number here. This overrides
the `model_selection` parameter. If there are multiple output
features, then pass a list of indices with the order the same
as the output feature.
Returns
-------
best_equation : torch.nn.Module
PyTorch module representing the expression.
"""
self.set_params(output_torch_format=True)
self.refresh()
best_equation = self.get_best(index=index)
if self.nout_ > 1:
return [eq["torch_format"] for eq in best_equation]
return best_equation["torch_format"]
def _read_equation_file(self):
"""Read the hall of fame file created by `SymbolicRegression.jl`."""
try:
if self.nout_ > 1:
all_outputs = []
for i in range(1, self.nout_ + 1):
cur_filename = str(self.equation_file_) + f".out{i}" + ".bkup"
if not os.path.exists(cur_filename):
cur_filename = str(self.equation_file_) + f".out{i}"
with open(cur_filename, "r") as f:
buf = f.read()
buf = _preprocess_julia_floats(buf)
df = pd.read_csv(StringIO(buf))
# Rename Complexity column to complexity:
df.rename(
columns={
"Complexity": "complexity",
"Loss": "loss",
"Equation": "equation",
},
inplace=True,
)
all_outputs.append(df)
else:
filename = str(self.equation_file_) + ".bkup"
if not os.path.exists(filename):
filename = str(self.equation_file_)
with open(filename, "r") as f:
buf = f.read()
buf = _preprocess_julia_floats(buf)
all_outputs = [pd.read_csv(StringIO(buf))]
all_outputs[-1].rename(
columns={
"Complexity": "complexity",
"Loss": "loss",
"Equation": "equation",
},
inplace=True,
)
except FileNotFoundError:
raise RuntimeError(
"Couldn't find equation file! The equation search likely exited "
"before a single iteration completed."
)
return all_outputs
def get_hof(self):
"""Get the equations from a hall of fame file.
If no arguments entered, the ones used
previously from a call to PySR will be used.
"""
check_is_fitted(
self,
attributes=[
"nout_",
"equation_file_",
"selection_mask_",
"feature_names_in_",
],
)
if (
not hasattr(self, "equation_file_contents_")
) or self.equation_file_contents_ is None:
self.equation_file_contents_ = self._read_equation_file()
# It is expected extra_jax/torch_mappings will be updated after fit.
# Thus, validation is performed here instead of in _validate_init_params
extra_jax_mappings = self.extra_jax_mappings
extra_torch_mappings = self.extra_torch_mappings
if extra_jax_mappings is not None:
for value in extra_jax_mappings.values():
if not isinstance(value, str):
raise ValueError(
"extra_jax_mappings must have keys that are strings! "
"e.g., {sympy.sqrt: 'jnp.sqrt'}."
)
else:
extra_jax_mappings = {}
if extra_torch_mappings is not None:
for value in extra_torch_mappings.values():
if not callable(value):
raise ValueError(
"extra_torch_mappings must be callable functions! "
"e.g., {sympy.sqrt: torch.sqrt}."
)
else:
extra_torch_mappings = {}
ret_outputs = []
equation_file_contents = copy.deepcopy(self.equation_file_contents_)
for output in equation_file_contents:
scores = []
lastMSE = None
lastComplexity = 0
sympy_format = []
lambda_format = []
if self.output_jax_format:
jax_format = []
if self.output_torch_format:
torch_format = []
local_sympy_mappings = {
**(self.extra_sympy_mappings if self.extra_sympy_mappings else {}),
**sympy_mappings,
}
sympy_symbols = [
sympy.Symbol(variable) for variable in self.feature_names_in_
]
for _, eqn_row in output.iterrows():
eqn = sympify(eqn_row["equation"], locals=local_sympy_mappings)
sympy_format.append(eqn)
# Numpy:
lambda_format.append(
CallableEquation(
sympy_symbols, eqn, self.selection_mask_, self.feature_names_in_
)
)
# JAX:
if self.output_jax_format:
from .export_jax import sympy2jax
func, params = sympy2jax(
eqn,
sympy_symbols,
selection=self.selection_mask_,
extra_jax_mappings=(
self.extra_jax_mappings if self.extra_jax_mappings else {}
),
)
jax_format.append({"callable": func, "parameters": params})
# Torch:
if self.output_torch_format:
from .export_torch import sympy2torch
module = sympy2torch(
eqn,
sympy_symbols,
selection=self.selection_mask_,
extra_torch_mappings=(
self.extra_torch_mappings
if self.extra_torch_mappings
else {}
),
)
torch_format.append(module)
curMSE = eqn_row["loss"]
curComplexity = eqn_row["complexity"]
if lastMSE is None:
cur_score = 0.0
else:
if curMSE > 0.0:
# TODO Move this to more obvious function/file.
cur_score = -np.log(curMSE / lastMSE) / (
curComplexity - lastComplexity
)
else:
cur_score = np.inf
scores.append(cur_score)
lastMSE = curMSE
lastComplexity = curComplexity
output["score"] = np.array(scores)
output["sympy_format"] = sympy_format
output["lambda_format"] = lambda_format
output_cols = [
"complexity",
"loss",
"score",
"equation",
"sympy_format",
"lambda_format",
]
if self.output_jax_format:
output_cols += ["jax_format"]
output["jax_format"] = jax_format
if self.output_torch_format:
output_cols += ["torch_format"]
output["torch_format"] = torch_format
ret_outputs.append(output[output_cols])
if self.nout_ > 1:
return ret_outputs
return ret_outputs[0]
def latex_table(
self,
indices=None,
precision=3,
columns=["equation", "complexity", "loss", "score"],
):
"""Create a LaTeX/booktabs table for all, or some, of the equations.
Parameters
----------
indices : list[int] | list[list[int]]
If you wish to select a particular subset of equations from
`self.equations_`, give the row numbers here. By default,
all equations will be used. If there are multiple output
features, then pass a list of lists.
precision : int
The number of significant figures shown in the LaTeX
representations.
Default is `3`.
columns : list[str]
Which columns to include in the table.
Default is `["equation", "complexity", "loss", "score"]`.
Returns
-------
latex_table_str : str
A string that will render a table in LaTeX of the equations.
"""
self.refresh()
if self.nout_ > 1:
if indices is not None:
assert isinstance(indices, list)
assert isinstance(indices[0], list)
assert len(indices) == self.nout_
generator_fnc = generate_multiple_tables
else:
if indices is not None:
assert isinstance(indices, list)
assert isinstance(indices[0], int)
generator_fnc = generate_single_table
table_string = generator_fnc(
self.equations_, indices=indices, precision=precision, columns=columns
)
preamble_string = [
r"\usepackage{breqn}",
r"\usepackage{booktabs}",
"",
"...",
"",
]
return "\n".join(preamble_string + [table_string])
def idx_model_selection(equations: pd.DataFrame, model_selection: str) -> int:
"""Select an expression and return its index."""
if model_selection == "accuracy":
chosen_idx = equations["loss"].idxmin()
elif model_selection == "best":
threshold = 1.5 * equations["loss"].min()
filtered_equations = equations.query(f"loss <= {threshold}")
chosen_idx = filtered_equations["score"].idxmax()
elif model_selection == "score":
chosen_idx = equations["score"].idxmax()
else:
raise NotImplementedError(
f"{model_selection} is not a valid model selection strategy."
)
return chosen_idx
def _denoise(X, y, Xresampled=None, random_state=None):
"""Denoise the dataset using a Gaussian process."""
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel
gp_kernel = RBF(np.ones(X.shape[1])) + WhiteKernel(1e-1) + ConstantKernel()
gpr = GaussianProcessRegressor(
kernel=gp_kernel, n_restarts_optimizer=50, random_state=random_state
)
gpr.fit(X, y)
if Xresampled is not None:
return Xresampled, gpr.predict(Xresampled)
return X, gpr.predict(X)
# Function has not been removed only due to usage in module tests
def _handle_feature_selection(X, select_k_features, y, variable_names):
if select_k_features is not None:
selection = run_feature_selection(X, y, select_k_features)
print(f"Using features {[variable_names[i] for i in selection]}")
X = X[:, selection]
else:
selection = None
return X, selection
def run_feature_selection(X, y, select_k_features, random_state=None):
"""
Find most important features.
Uses a gradient boosting tree regressor as a proxy for finding
the k most important features in X, returning indices for those
features as output.
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
clf = RandomForestRegressor(
n_estimators=100, max_depth=3, random_state=random_state
)
clf.fit(X, y)
selector = SelectFromModel(
clf, threshold=-np.inf, max_features=select_k_features, prefit=True
)
return selector.get_support(indices=True)
def _csv_filename_to_pkl_filename(csv_filename) -> str:
# Assume that the csv filename is of the form "foo.csv"
assert str(csv_filename).endswith(".csv")
dirname = str(os.path.dirname(csv_filename))
basename = str(os.path.basename(csv_filename))
base = str(os.path.splitext(basename)[0])
pkl_basename = base + ".pkl"
return os.path.join(dirname, pkl_basename)
_regexp_im = re.compile(r"\b(\d+\.\d+)im\b")
_regexp_im_sci = re.compile(r"\b(\d+\.\d+)[eEfF]([+-]?\d+)im\b")
_regexp_sci = re.compile(r"\b(\d+\.\d+)[eEfF]([+-]?\d+)\b")
_apply_regexp_im = lambda x: _regexp_im.sub(r"\1j", x)
_apply_regexp_im_sci = lambda x: _regexp_im_sci.sub(r"\1e\2j", x)
_apply_regexp_sci = lambda x: _regexp_sci.sub(r"\1e\2", x)
def _preprocess_julia_floats(s: str) -> str:
if isinstance(s, str):
s = _apply_regexp_im(s)
s = _apply_regexp_im_sci(s)
s = _apply_regexp_sci(s)
return s
| 98,031 | 39.643449 | 112 | py |
PySR | PySR-master/pysr/julia_helpers.py | """Functions for initializing the Julia environment and installing deps."""
import sys
import subprocess
import warnings
from pathlib import Path
import os
from julia.api import JuliaError
from .version import __version__, __symbolic_regression_jl_version__
juliainfo = None
julia_initialized = False
julia_kwargs_at_initialization = None
julia_activated_env = None
def _load_juliainfo():
"""Execute julia.core.JuliaInfo.load(), and store as juliainfo."""
global juliainfo
if juliainfo is None:
from julia.core import JuliaInfo
try:
juliainfo = JuliaInfo.load(julia="julia")
except FileNotFoundError:
env_path = os.environ["PATH"]
raise FileNotFoundError(
f"Julia is not installed in your PATH. Please install Julia and add it to your PATH.\n\nCurrent PATH: {env_path}",
)
return juliainfo
def _get_julia_env_dir():
# Have to manually get env dir:
try:
julia_env_dir_str = subprocess.run(
["julia", "-e using Pkg; print(Pkg.envdir())"],
capture_output=True,
env=os.environ,
).stdout.decode()
except FileNotFoundError:
env_path = os.environ["PATH"]
raise FileNotFoundError(
f"Julia is not installed in your PATH. Please install Julia and add it to your PATH.\n\nCurrent PATH: {env_path}",
)
return Path(julia_env_dir_str)
def _set_julia_project_env(julia_project, is_shared):
if is_shared:
if is_julia_version_greater_eq(version=(1, 7, 0)):
os.environ["JULIA_PROJECT"] = "@" + str(julia_project)
else:
julia_env_dir = _get_julia_env_dir()
os.environ["JULIA_PROJECT"] = str(julia_env_dir / julia_project)
else:
os.environ["JULIA_PROJECT"] = str(julia_project)
def _get_io_arg(quiet):
io = "devnull" if quiet else "stderr"
io_arg = f"io={io}" if is_julia_version_greater_eq(version=(1, 6, 0)) else ""
return io_arg
def install(julia_project=None, quiet=False, precompile=None): # pragma: no cover
"""
Install PyCall.jl and all required dependencies for SymbolicRegression.jl.
Also updates the local Julia registry.
"""
import julia
_julia_version_assertion()
# Set JULIA_PROJECT so that we install in the pysr environment
processed_julia_project, is_shared = _process_julia_project(julia_project)
_set_julia_project_env(processed_julia_project, is_shared)
if precompile == False:
os.environ["JULIA_PKG_PRECOMPILE_AUTO"] = "0"
try:
julia.install(quiet=quiet)
except julia.tools.PyCallInstallError:
# Attempt to reset PyCall.jl's build:
subprocess.run(
[
"julia",
"-e",
f'ENV["PYTHON"] = "{sys.executable}"; import Pkg; Pkg.build("PyCall")',
],
)
# Try installing again:
julia.install(quiet=quiet)
Main, init_log = init_julia(julia_project, quiet=quiet, return_aux=True)
io_arg = _get_io_arg(quiet)
if precompile is None:
precompile = init_log["compiled_modules"]
if not precompile:
Main.eval('ENV["JULIA_PKG_PRECOMPILE_AUTO"] = 0')
if is_shared:
# Install SymbolicRegression.jl:
_add_sr_to_julia_project(Main, io_arg)
Main.eval("using Pkg")
Main.eval(f"Pkg.instantiate({io_arg})")
if precompile:
Main.eval(f"Pkg.precompile({io_arg})")
if not quiet:
warnings.warn(
"It is recommended to restart Python after installing PySR's dependencies,"
" so that the Julia environment is properly initialized."
)
def _import_error():
return """
Required dependencies are not installed or built. Run the following command in your terminal:
python3 -m pysr install
"""
def _process_julia_project(julia_project):
if julia_project is None:
is_shared = True
processed_julia_project = f"pysr-{__version__}"
elif julia_project[0] == "@":
is_shared = True
processed_julia_project = julia_project[1:]
else:
is_shared = False
processed_julia_project = Path(julia_project)
return processed_julia_project, is_shared
def is_julia_version_greater_eq(juliainfo=None, version=(1, 6, 0)):
"""Check if Julia version is greater than specified version."""
if juliainfo is None:
juliainfo = _load_juliainfo()
current_version = (
juliainfo.version_major,
juliainfo.version_minor,
juliainfo.version_patch,
)
return current_version >= version
def _check_for_conflicting_libraries(): # pragma: no cover
"""Check whether there are conflicting modules, and display warnings."""
# See https://github.com/pytorch/pytorch/issues/78829: importing
# pytorch before running `pysr.fit` causes a segfault.
torch_is_loaded = "torch" in sys.modules
if torch_is_loaded:
warnings.warn(
"`torch` was loaded before the Julia instance started. "
"This may cause a segfault when running `PySRRegressor.fit`. "
"To avoid this, please run `pysr.julia_helpers.init_julia()` *before* "
"importing `torch`. "
"For updates, see https://github.com/pytorch/pytorch/issues/78829"
)
def init_julia(julia_project=None, quiet=False, julia_kwargs=None, return_aux=False):
"""Initialize julia binary, turning off compiled modules if needed."""
global julia_initialized
global julia_kwargs_at_initialization
global julia_activated_env
if not julia_initialized:
_check_for_conflicting_libraries()
if julia_kwargs is None:
julia_kwargs = {"optimize": 3}
from julia.core import JuliaInfo, UnsupportedPythonError
_julia_version_assertion()
processed_julia_project, is_shared = _process_julia_project(julia_project)
_set_julia_project_env(processed_julia_project, is_shared)
try:
info = JuliaInfo.load(julia="julia")
except FileNotFoundError:
env_path = os.environ["PATH"]
raise FileNotFoundError(
f"Julia is not installed in your PATH. Please install Julia and add it to your PATH.\n\nCurrent PATH: {env_path}",
)
if not info.is_pycall_built():
raise ImportError(_import_error())
from julia.core import Julia
try:
Julia(**julia_kwargs)
except UnsupportedPythonError:
# Static python binary, so we turn off pre-compiled modules.
julia_kwargs = {**julia_kwargs, "compiled_modules": False}
Julia(**julia_kwargs)
warnings.warn(
"Your system's Python library is static (e.g., conda), so precompilation will be turned off. For a dynamic library, try using `pyenv` and installing with `--enable-shared`: https://github.com/pyenv/pyenv/blob/master/plugins/python-build/README.md#building-with---enable-shared."
)
using_compiled_modules = (not "compiled_modules" in julia_kwargs) or julia_kwargs[
"compiled_modules"
]
from julia import Main as _Main
Main = _Main
if julia_activated_env is None:
julia_activated_env = processed_julia_project
if julia_initialized and julia_kwargs_at_initialization is not None:
# Check if the kwargs are the same as the previous initialization
init_set = set(julia_kwargs_at_initialization.items())
new_set = set(julia_kwargs.items())
set_diff = new_set - init_set
# Remove the `compiled_modules` key, since it is not a user-specified kwarg:
set_diff = {k: v for k, v in set_diff if k != "compiled_modules"}
if len(set_diff) > 0:
warnings.warn(
"Julia has already started. The new Julia options "
+ str(set_diff)
+ " will be ignored."
)
if julia_initialized and julia_activated_env != processed_julia_project:
Main.eval("using Pkg")
io_arg = _get_io_arg(quiet)
# Can't pass IO to Julia call as it evaluates to PyObject, so just directly
# use Main.eval:
Main.eval(
f'Pkg.activate("{_escape_filename(processed_julia_project)}",'
f"shared = Bool({int(is_shared)}), "
f"{io_arg})"
)
julia_activated_env = processed_julia_project
if not julia_initialized:
julia_kwargs_at_initialization = julia_kwargs
julia_initialized = True
if return_aux:
return Main, {"compiled_modules": using_compiled_modules}
return Main
def _add_sr_to_julia_project(Main, io_arg):
Main.eval("using Pkg")
Main.sr_spec = Main.PackageSpec(
name="SymbolicRegression",
url="https://github.com/MilesCranmer/SymbolicRegression.jl",
rev="v" + __symbolic_regression_jl_version__,
)
Main.clustermanagers_spec = Main.PackageSpec(
name="ClusterManagers",
url="https://github.com/JuliaParallel/ClusterManagers.jl",
rev="14e7302f068794099344d5d93f71979aaf4fbeb3",
)
Main.eval(f"Pkg.add([sr_spec, clustermanagers_spec], {io_arg})")
def _escape_filename(filename):
"""Turn a path into a string with correctly escaped backslashes."""
str_repr = str(filename)
str_repr = str_repr.replace("\\", "\\\\")
return str_repr
def _julia_version_assertion():
if not is_julia_version_greater_eq(version=(1, 6, 0)):
raise NotImplementedError(
"PySR requires Julia 1.6.0 or greater. "
"Please update your Julia installation."
)
def _backend_version_assertion(Main):
try:
backend_version = Main.eval("string(SymbolicRegression.PACKAGE_VERSION)")
expected_backend_version = __symbolic_regression_jl_version__
if backend_version != expected_backend_version: # pragma: no cover
warnings.warn(
f"PySR backend (SymbolicRegression.jl) version {backend_version} "
f"does not match expected version {expected_backend_version}. "
"Things may break. "
"Please update your PySR installation with "
"`python3 -m pysr install`."
)
except JuliaError: # pragma: no cover
warnings.warn(
"You seem to have an outdated version of SymbolicRegression.jl. "
"Things may break. "
"Please update your PySR installation with "
"`python3 -m pysr install`."
)
def _load_cluster_manager(Main, cluster_manager):
Main.eval(f"import ClusterManagers: addprocs_{cluster_manager}")
return Main.eval(f"addprocs_{cluster_manager}")
def _update_julia_project(Main, is_shared, io_arg):
try:
if is_shared:
_add_sr_to_julia_project(Main, io_arg)
Main.eval("using Pkg")
Main.eval(f"Pkg.resolve({io_arg})")
except (JuliaError, RuntimeError) as e:
raise ImportError(_import_error()) from e
def _load_backend(Main):
try:
# Load namespace, so that various internal operators work:
Main.eval("using SymbolicRegression")
except (JuliaError, RuntimeError) as e:
raise ImportError(_import_error()) from e
_backend_version_assertion(Main)
# Load Julia package SymbolicRegression.jl
from julia import SymbolicRegression
return SymbolicRegression
| 11,396 | 32.619469 | 290 | py |
PySR | PySR-master/pysr/export_torch.py | #####
# From https://github.com/patrick-kidger/sympytorch
# Copied here to allow PySR-specific tweaks
#####
import collections as co
import functools as ft
import sympy
def _reduce(fn):
def fn_(*args):
return ft.reduce(fn, args)
return fn_
torch_initialized = False
torch = None
SingleSymPyModule = None
def _initialize_torch():
global torch_initialized
global torch
global SingleSymPyModule
# Way to lazy load torch, only if this is called,
# but still allow this module to be loaded in __init__
if not torch_initialized:
import torch as _torch
torch = _torch
_global_func_lookup = {
sympy.Mul: _reduce(torch.mul),
sympy.Add: _reduce(torch.add),
sympy.div: torch.div,
sympy.Abs: torch.abs,
sympy.sign: torch.sign,
# Note: May raise error for ints.
sympy.ceiling: torch.ceil,
sympy.floor: torch.floor,
sympy.log: torch.log,
sympy.exp: torch.exp,
sympy.sqrt: torch.sqrt,
sympy.cos: torch.cos,
sympy.acos: torch.acos,
sympy.sin: torch.sin,
sympy.asin: torch.asin,
sympy.tan: torch.tan,
sympy.atan: torch.atan,
sympy.atan2: torch.atan2,
# Note: May give NaN for complex results.
sympy.cosh: torch.cosh,
sympy.acosh: torch.acosh,
sympy.sinh: torch.sinh,
sympy.asinh: torch.asinh,
sympy.tanh: torch.tanh,
sympy.atanh: torch.atanh,
sympy.Pow: torch.pow,
sympy.re: torch.real,
sympy.im: torch.imag,
sympy.arg: torch.angle,
# Note: May raise error for ints and complexes
sympy.erf: torch.erf,
sympy.loggamma: torch.lgamma,
sympy.Eq: torch.eq,
sympy.Ne: torch.ne,
sympy.StrictGreaterThan: torch.gt,
sympy.StrictLessThan: torch.lt,
sympy.LessThan: torch.le,
sympy.GreaterThan: torch.ge,
sympy.And: torch.logical_and,
sympy.Or: torch.logical_or,
sympy.Not: torch.logical_not,
sympy.Max: torch.max,
sympy.Min: torch.min,
sympy.Mod: torch.remainder,
sympy.Heaviside: torch.heaviside,
sympy.core.numbers.Half: (lambda: 0.5),
sympy.core.numbers.One: (lambda: 1.0),
}
class _Node(torch.nn.Module):
"""SympyTorch code from https://github.com/patrick-kidger/sympytorch"""
def __init__(self, *, expr, _memodict, _func_lookup, **kwargs):
super().__init__(**kwargs)
self._sympy_func = expr.func
if issubclass(expr.func, sympy.Float):
self._value = torch.nn.Parameter(torch.tensor(float(expr)))
self._torch_func = lambda: self._value
self._args = ()
elif issubclass(expr.func, sympy.Rational):
# This is some fraction fixed in the operator.
self._value = float(expr)
self._torch_func = lambda: self._value
self._args = ()
elif issubclass(expr.func, sympy.UnevaluatedExpr):
if len(expr.args) != 1 or not issubclass(
expr.args[0].func, sympy.Float
):
raise ValueError(
"UnevaluatedExpr should only be used to wrap floats."
)
self.register_buffer("_value", torch.tensor(float(expr.args[0])))
self._torch_func = lambda: self._value
self._args = ()
elif issubclass(expr.func, sympy.Integer):
# Can get here if expr is one of the Integer special cases,
# e.g. NegativeOne
self._value = int(expr)
self._torch_func = lambda: self._value
self._args = ()
elif issubclass(expr.func, sympy.Symbol):
self._name = expr.name
self._torch_func = lambda value: value
self._args = ((lambda memodict: memodict[expr.name]),)
else:
try:
self._torch_func = _func_lookup[expr.func]
except KeyError:
raise KeyError(
f"Function {expr.func} was not found in Torch function mappings."
"Please add it to extra_torch_mappings in the format, e.g., "
"{sympy.sqrt: torch.sqrt}."
)
args = []
for arg in expr.args:
try:
arg_ = _memodict[arg]
except KeyError:
arg_ = type(self)(
expr=arg,
_memodict=_memodict,
_func_lookup=_func_lookup,
**kwargs,
)
_memodict[arg] = arg_
args.append(arg_)
self._args = torch.nn.ModuleList(args)
def forward(self, memodict):
args = []
for arg in self._args:
try:
arg_ = memodict[arg]
except KeyError:
arg_ = arg(memodict)
memodict[arg] = arg_
args.append(arg_)
return self._torch_func(*args)
class _SingleSymPyModule(torch.nn.Module):
"""SympyTorch code from https://github.com/patrick-kidger/sympytorch"""
def __init__(
self, expression, symbols_in, selection=None, extra_funcs=None, **kwargs
):
super().__init__(**kwargs)
if extra_funcs is None:
extra_funcs = {}
_func_lookup = co.ChainMap(_global_func_lookup, extra_funcs)
_memodict = {}
self._node = _Node(
expr=expression, _memodict=_memodict, _func_lookup=_func_lookup
)
self._expression_string = str(expression)
self._selection = selection
self.symbols_in = [str(symbol) for symbol in symbols_in]
def __repr__(self):
return f"{type(self).__name__}(expression={self._expression_string})"
def forward(self, X):
if self._selection is not None:
X = X[:, self._selection]
symbols = {symbol: X[:, i] for i, symbol in enumerate(self.symbols_in)}
return self._node(symbols)
SingleSymPyModule = _SingleSymPyModule
def sympy2torch(expression, symbols_in, selection=None, extra_torch_mappings=None):
"""Returns a module for a given sympy expression with trainable parameters;
This function will assume the input to the module is a matrix X, where
each column corresponds to each symbol you pass in `symbols_in`.
"""
global SingleSymPyModule
_initialize_torch()
return SingleSymPyModule(
expression, symbols_in, selection=selection, extra_funcs=extra_torch_mappings
)
| 7,576 | 36.509901 | 93 | py |
PySR | PySR-master/pysr/__init__.py | from . import sklearn_monkeypatch
from .version import __version__
from .sr import (
pysr,
PySRRegressor,
best,
best_tex,
best_callable,
best_row,
)
from .julia_helpers import install
from .feynman_problems import Problem, FeynmanProblem
from .export_jax import sympy2jax
from .export_torch import sympy2torch
| 334 | 21.333333 | 53 | py |
PySR | PySR-master/pysr/export_jax.py | import functools as ft
import sympy
import string
import random
# Special since need to reduce arguments.
MUL = 0
ADD = 1
_jnp_func_lookup = {
sympy.Mul: MUL,
sympy.Add: ADD,
sympy.div: "jnp.div",
sympy.Abs: "jnp.abs",
sympy.sign: "jnp.sign",
# Note: May raise error for ints.
sympy.ceiling: "jnp.ceil",
sympy.floor: "jnp.floor",
sympy.log: "jnp.log",
sympy.exp: "jnp.exp",
sympy.sqrt: "jnp.sqrt",
sympy.cos: "jnp.cos",
sympy.acos: "jnp.acos",
sympy.sin: "jnp.sin",
sympy.asin: "jnp.asin",
sympy.tan: "jnp.tan",
sympy.atan: "jnp.atan",
sympy.atan2: "jnp.atan2",
# Note: Also may give NaN for complex results.
sympy.cosh: "jnp.cosh",
sympy.acosh: "jnp.acosh",
sympy.sinh: "jnp.sinh",
sympy.asinh: "jnp.asinh",
sympy.tanh: "jnp.tanh",
sympy.atanh: "jnp.atanh",
sympy.Pow: "jnp.power",
sympy.re: "jnp.real",
sympy.im: "jnp.imag",
sympy.arg: "jnp.angle",
# Note: May raise error for ints and complexes
sympy.erf: "jsp.erf",
sympy.erfc: "jsp.erfc",
sympy.LessThan: "jnp.less",
sympy.GreaterThan: "jnp.greater",
sympy.And: "jnp.logical_and",
sympy.Or: "jnp.logical_or",
sympy.Not: "jnp.logical_not",
sympy.Max: "jnp.max",
sympy.Min: "jnp.min",
sympy.Mod: "jnp.mod",
sympy.Heaviside: "jnp.heaviside",
sympy.core.numbers.Half: "(lambda: 0.5)",
sympy.core.numbers.One: "(lambda: 1.0)",
}
def sympy2jaxtext(expr, parameters, symbols_in, extra_jax_mappings=None):
if issubclass(expr.func, sympy.Float):
parameters.append(float(expr))
return f"parameters[{len(parameters) - 1}]"
elif issubclass(expr.func, sympy.Rational):
return f"{float(expr)}"
elif issubclass(expr.func, sympy.Integer):
return f"{int(expr)}"
elif issubclass(expr.func, sympy.Symbol):
return (
f"X[:, {[i for i in range(len(symbols_in)) if symbols_in[i] == expr][0]}]"
)
if extra_jax_mappings is None:
extra_jax_mappings = {}
try:
_func = {**_jnp_func_lookup, **extra_jax_mappings}[expr.func]
except KeyError:
raise KeyError(
f"Function {expr.func} was not found in JAX function mappings."
"Please add it to extra_jax_mappings in the format, e.g., "
"{sympy.sqrt: 'jnp.sqrt'}."
)
args = [
sympy2jaxtext(
arg, parameters, symbols_in, extra_jax_mappings=extra_jax_mappings
)
for arg in expr.args
]
if _func == MUL:
return " * ".join(["(" + arg + ")" for arg in args])
if _func == ADD:
return " + ".join(["(" + arg + ")" for arg in args])
return f'{_func}({", ".join(args)})'
jax_initialized = False
jax = None
jnp = None
jsp = None
def _initialize_jax():
global jax_initialized
global jax
global jnp
global jsp
if not jax_initialized:
import jax as _jax
from jax import numpy as _jnp
from jax.scipy import special as _jsp
jax = _jax
jnp = _jnp
jsp = _jsp
def sympy2jax(expression, symbols_in, selection=None, extra_jax_mappings=None):
"""Returns a function f and its parameters;
the function takes an input matrix, and a list of arguments:
f(X, parameters)
where the parameters appear in the JAX equation.
# Examples:
Let's create a function in SymPy:
```python
x, y = symbols('x y')
cosx = 1.0 * sympy.cos(x) + 3.2 * y
```
Let's get the JAX version. We pass the equation, and
the symbols required.
```python
f, params = sympy2jax(cosx, [x, y])
```
The order you supply the symbols is the same order
you should supply the features when calling
the function `f` (shape `[nrows, nfeatures]`).
In this case, features=2 for x and y.
The `params` in this case will be
`jnp.array([1.0, 3.2])`. You pass these parameters
when calling the function, which will let you change them
and take gradients.
Let's generate some JAX data to pass:
```python
key = random.PRNGKey(0)
X = random.normal(key, (10, 2))
```
We can call the function with:
```python
f(X, params)
#> DeviceArray([-2.6080756 , 0.72633684, -6.7557726 , -0.2963162 ,
# 6.6014843 , 5.032483 , -0.810931 , 4.2520013 ,
# 3.5427954 , -2.7479894 ], dtype=float32)
```
We can take gradients with respect
to the parameters for each row with JAX
gradient parameters now:
```python
jac_f = jax.jacobian(f, argnums=1)
jac_f(X, params)
#> DeviceArray([[ 0.49364874, -0.9692889 ],
# [ 0.8283714 , -0.0318858 ],
# [-0.7447336 , -1.8784496 ],
# [ 0.70755106, -0.3137085 ],
# [ 0.944834 , 1.767703 ],
# [ 0.51673377, 1.4111717 ],
# [ 0.87347716, -0.52637756],
# [ 0.8760679 , 1.0549792 ],
# [ 0.9961824 , 0.79581654],
# [-0.88465923, -0.5822907 ]], dtype=float32)
```
We can also JIT-compile our function:
```python
compiled_f = jax.jit(f)
compiled_f(X, params)
#> DeviceArray([-2.6080756 , 0.72633684, -6.7557726 , -0.2963162 ,
# 6.6014843 , 5.032483 , -0.810931 , 4.2520013 ,
# 3.5427954 , -2.7479894 ], dtype=float32)
```
"""
_initialize_jax()
global jax_initialized
global jax
global jnp
global jsp
parameters = []
functional_form_text = sympy2jaxtext(
expression, parameters, symbols_in, extra_jax_mappings
)
hash_string = "A_" + str(abs(hash(str(expression) + str(symbols_in))))
text = f"def {hash_string}(X, parameters):\n"
if selection is not None:
# Impose the feature selection:
text += f" X = X[:, {list(selection)}]\n"
text += " return "
text += functional_form_text
ldict = {}
exec(text, globals(), ldict)
return ldict[hash_string], jnp.array(parameters)
| 6,320 | 29.834146 | 86 | py |
PySR | PySR-master/pysr/test/test_jax.py | import unittest
import numpy as np
import pandas as pd
import sympy
from functools import partial
from .. import sympy2jax, PySRRegressor
class TestJAX(unittest.TestCase):
def setUp(self):
np.random.seed(0)
def test_sympy2jax(self):
from jax import numpy as jnp
from jax import random
x, y, z = sympy.symbols("x y z")
cosx = 1.0 * sympy.cos(x) + y
key = random.PRNGKey(0)
X = random.normal(key, (1000, 2))
true = 1.0 * jnp.cos(X[:, 0]) + X[:, 1]
f, params = sympy2jax(cosx, [x, y, z])
self.assertTrue(jnp.all(jnp.isclose(f(X, params), true)).item())
def test_pipeline_pandas(self):
from jax import numpy as jnp
X = pd.DataFrame(np.random.randn(100, 10))
y = np.ones(X.shape[0])
model = PySRRegressor(
progress=False,
max_evals=10000,
output_jax_format=True,
)
model.fit(X, y)
equations = pd.DataFrame(
{
"Equation": ["1.0", "cos(x1)", "square(cos(x1))"],
"Loss": [1.0, 0.1, 1e-5],
"Complexity": [1, 2, 3],
}
)
equations["Complexity Loss Equation".split(" ")].to_csv(
"equation_file.csv.bkup"
)
model.refresh(checkpoint_file="equation_file.csv")
jformat = model.jax()
np.testing.assert_almost_equal(
np.array(jformat["callable"](jnp.array(X), jformat["parameters"])),
np.square(np.cos(X.values[:, 1])), # Select feature 1
decimal=3,
)
def test_pipeline(self):
from jax import numpy as jnp
X = np.random.randn(100, 10)
y = np.ones(X.shape[0])
model = PySRRegressor(progress=False, max_evals=10000, output_jax_format=True)
model.fit(X, y)
equations = pd.DataFrame(
{
"Equation": ["1.0", "cos(x1)", "square(cos(x1))"],
"Loss": [1.0, 0.1, 1e-5],
"Complexity": [1, 2, 3],
}
)
equations["Complexity Loss Equation".split(" ")].to_csv(
"equation_file.csv.bkup"
)
model.refresh(checkpoint_file="equation_file.csv")
jformat = model.jax()
np.testing.assert_almost_equal(
np.array(jformat["callable"](jnp.array(X), jformat["parameters"])),
np.square(np.cos(X[:, 1])), # Select feature 1
decimal=3,
)
def test_feature_selection_custom_operators(self):
rstate = np.random.RandomState(0)
X = pd.DataFrame({f"k{i}": rstate.randn(2000) for i in range(10, 21)})
cos_approx = lambda x: 1 - (x**2) / 2 + (x**4) / 24 + (x**6) / 720
y = X["k15"] ** 2 + 2 * cos_approx(X["k20"])
model = PySRRegressor(
progress=False,
unary_operators=["cos_approx(x) = 1 - x^2 / 2 + x^4 / 24 + x^6 / 720"],
select_k_features=3,
maxsize=10,
early_stop_condition=1e-5,
extra_sympy_mappings={"cos_approx": cos_approx},
extra_jax_mappings={
"cos_approx": "(lambda x: 1 - x**2 / 2 + x**4 / 24 + x**6 / 720)"
},
random_state=0,
deterministic=True,
procs=0,
multithreading=False,
)
np.random.seed(0)
model.fit(X.values, y.values)
f, parameters = model.jax().values()
np_prediction = model.predict
jax_prediction = partial(f, parameters=parameters)
np_output = np_prediction(X.values)
jax_output = jax_prediction(X.values)
np.testing.assert_almost_equal(y.values, np_output, decimal=3)
np.testing.assert_almost_equal(y.values, jax_output, decimal=3)
def runtests():
"""Run all tests in test_jax.py."""
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(TestJAX))
runner = unittest.TextTestRunner()
return runner.run(suite)
| 4,049 | 30.153846 | 86 | py |
PySR | PySR-master/pysr/test/__main__.py | """CLI for running PySR's test suite."""
import argparse
import os
from . import *
if __name__ == "__main__":
# Get args:
parser = argparse.ArgumentParser()
parser.usage = "python -m pysr.test [tests...]"
parser.add_argument(
"test",
nargs="*",
help="Test to run. One or more of 'main', 'env', 'jax', 'torch', 'cli'.",
)
# Parse args:
args = parser.parse_args()
tests = args.test
if len(tests) == 0:
# Raise help message:
parser.print_help()
raise SystemExit(1)
# Run tests:
for test in tests:
if test in {"main", "env", "jax", "torch", "cli"}:
cur_dir = os.path.dirname(os.path.abspath(__file__))
print(f"Running test from {cur_dir}")
if test == "main":
runtests()
elif test == "env":
runtests_env()
elif test == "jax":
runtests_jax()
elif test == "torch":
runtests_torch()
elif test == "cli":
runtests_cli()
else:
parser.print_help()
raise SystemExit(1)
| 1,157 | 25.318182 | 81 | py |
PySR | PySR-master/pysr/test/test.py | import os
import traceback
import inspect
import unittest
import numpy as np
from sklearn import model_selection
from sklearn.utils.estimator_checks import check_estimator
import sympy
import pandas as pd
import warnings
import pickle as pkl
import tempfile
from pathlib import Path
from .. import julia_helpers
from .. import PySRRegressor
from ..sr import (
run_feature_selection,
_handle_feature_selection,
_csv_filename_to_pkl_filename,
idx_model_selection,
)
from ..export_latex import to_latex
DEFAULT_PARAMS = inspect.signature(PySRRegressor.__init__).parameters
DEFAULT_NITERATIONS = DEFAULT_PARAMS["niterations"].default
DEFAULT_POPULATIONS = DEFAULT_PARAMS["populations"].default
DEFAULT_NCYCLES = DEFAULT_PARAMS["ncyclesperiteration"].default
class TestPipeline(unittest.TestCase):
def setUp(self):
# Using inspect,
# get default niterations from PySRRegressor, and double them:
self.default_test_kwargs = dict(
progress=False,
model_selection="accuracy",
niterations=DEFAULT_NITERATIONS * 2,
populations=DEFAULT_POPULATIONS * 2,
temp_equation_file=True,
)
self.rstate = np.random.RandomState(0)
self.X = self.rstate.randn(100, 5)
def test_linear_relation(self):
y = self.X[:, 0]
model = PySRRegressor(
**self.default_test_kwargs,
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 1",
)
model.fit(self.X, y)
print(model.equations_)
self.assertLessEqual(model.get_best()["loss"], 1e-4)
def test_linear_relation_named(self):
y = self.X[:, 0]
model = PySRRegressor(
**self.default_test_kwargs,
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 1",
)
model.fit(self.X, y, variable_names=["c1", "c2", "c3", "c4", "c5"])
self.assertIn("c1", model.equations_.iloc[-1]["equation"])
def test_linear_relation_weighted(self):
y = self.X[:, 0]
weights = np.ones_like(y)
model = PySRRegressor(
**self.default_test_kwargs,
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 1",
)
model.fit(self.X, y, weights=weights)
print(model.equations_)
self.assertLessEqual(model.get_best()["loss"], 1e-4)
def test_multiprocessing_turbo_custom_objective(self):
rstate = np.random.RandomState(0)
y = self.X[:, 0]
y += rstate.randn(*y.shape) * 1e-4
model = PySRRegressor(
**self.default_test_kwargs,
# Turbo needs to work with unsafe operators:
unary_operators=["sqrt"],
procs=2,
multithreading=False,
turbo=True,
early_stop_condition="stop_if(loss, complexity) = loss < 1e-10 && complexity == 1",
full_objective="""
function my_objective(tree::Node{T}, dataset::Dataset{T}, options::Options) where T
prediction, flag = eval_tree_array(tree, dataset.X, options)
!flag && return T(Inf)
abs3(x) = abs(x) ^ 3
return sum(abs3, prediction .- dataset.y) / length(prediction)
end
""",
)
model.fit(self.X, y)
print(model.equations_)
best_loss = model.equations_.iloc[-1]["loss"]
self.assertLessEqual(best_loss, 1e-10)
self.assertGreaterEqual(best_loss, 0.0)
def test_high_precision_search_custom_loss(self):
y = 1.23456789 * self.X[:, 0]
model = PySRRegressor(
**self.default_test_kwargs,
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 3",
loss="my_loss(prediction, target) = (prediction - target)^2",
precision=64,
parsimony=0.01,
warm_start=True,
)
model.fit(self.X, y)
from pysr.sr import Main
# We should have that the model state is now a Float64 hof:
Main.test_state = model.raw_julia_state_
self.assertTrue(Main.eval("typeof(test_state[2]).parameters[1] == Float64"))
def test_multioutput_custom_operator_quiet_custom_complexity(self):
y = self.X[:, [0, 1]] ** 2
model = PySRRegressor(
unary_operators=["square_op(x) = x^2"],
extra_sympy_mappings={"square_op": lambda x: x**2},
complexity_of_operators={"square_op": 2, "plus": 1},
binary_operators=["plus"],
verbosity=0,
**self.default_test_kwargs,
procs=0,
# Test custom operators with turbo:
turbo=True,
# Test custom operators with constraints:
nested_constraints={"square_op": {"square_op": 3}},
constraints={"square_op": 10},
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 3",
)
model.fit(self.X, y)
equations = model.equations_
print(equations)
self.assertIn("square_op", model.equations_[0].iloc[-1]["equation"])
self.assertLessEqual(equations[0].iloc[-1]["loss"], 1e-4)
self.assertLessEqual(equations[1].iloc[-1]["loss"], 1e-4)
test_y1 = model.predict(self.X)
test_y2 = model.predict(self.X, index=[-1, -1])
mse1 = np.average((test_y1 - y) ** 2)
mse2 = np.average((test_y2 - y) ** 2)
self.assertLessEqual(mse1, 1e-4)
self.assertLessEqual(mse2, 1e-4)
bad_y = model.predict(self.X, index=[0, 0])
bad_mse = np.average((bad_y - y) ** 2)
self.assertGreater(bad_mse, 1e-4)
def test_multioutput_weighted_with_callable_temp_equation(self):
X = self.X.copy()
y = X[:, [0, 1]] ** 2
w = self.rstate.rand(*y.shape)
w[w < 0.5] = 0.0
w[w >= 0.5] = 1.0
# Double equation when weights are 0:
y = (2 - w) * y
# Thus, pysr needs to use the weights to find the right equation!
model = PySRRegressor(
unary_operators=["sq(x) = x^2"],
binary_operators=["plus"],
extra_sympy_mappings={"sq": lambda x: x**2},
**self.default_test_kwargs,
procs=0,
delete_tempfiles=False,
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 2",
)
model.fit(X.copy(), y, weights=w)
# These tests are flaky, so don't fail test:
try:
np.testing.assert_almost_equal(
model.predict(X.copy())[:, 0], X[:, 0] ** 2, decimal=3
)
except AssertionError:
print("Error in test_multioutput_weighted_with_callable_temp_equation")
print("Model equations: ", model.sympy()[0])
print("True equation: x0^2")
try:
np.testing.assert_almost_equal(
model.predict(X.copy())[:, 1], X[:, 1] ** 2, decimal=3
)
except AssertionError:
print("Error in test_multioutput_weighted_with_callable_temp_equation")
print("Model equations: ", model.sympy()[1])
print("True equation: x1^2")
def test_complex_equations_anonymous_stop(self):
X = self.rstate.randn(100, 3) + 1j * self.rstate.randn(100, 3)
y = (2 + 1j) * np.cos(X[:, 0] * (0.5 - 0.3j))
model = PySRRegressor(
binary_operators=["+", "-", "*"],
unary_operators=["cos"],
**self.default_test_kwargs,
early_stop_condition="(loss, complexity) -> loss <= 1e-4 && complexity <= 6",
)
model.fit(X, y)
test_y = model.predict(X)
self.assertTrue(np.issubdtype(test_y.dtype, np.complexfloating))
self.assertLessEqual(np.average(np.abs(test_y - y) ** 2), 1e-4)
def test_empty_operators_single_input_warm_start(self):
X = self.rstate.randn(100, 1)
y = X[:, 0] + 3.0
regressor = PySRRegressor(
unary_operators=[],
binary_operators=["plus"],
**self.default_test_kwargs,
early_stop_condition="stop_if(loss, complexity) = loss < 1e-4 && complexity == 3",
)
self.assertTrue("None" in regressor.__repr__())
regressor.fit(X, y)
self.assertTrue("None" not in regressor.__repr__())
self.assertTrue(">>>>" in regressor.__repr__())
self.assertLessEqual(regressor.equations_.iloc[-1]["loss"], 1e-4)
np.testing.assert_almost_equal(regressor.predict(X), y, decimal=1)
# Test if repeated fit works:
regressor.set_params(
niterations=1,
ncyclesperiteration=2,
warm_start=True,
early_stop_condition=None,
)
# Check that the the julia state is saved:
from pysr.sr import Main
# We should have that the model state is now a Float32 hof:
Main.test_state = regressor.raw_julia_state_
self.assertTrue(Main.eval("typeof(test_state[2]).parameters[1] == Float32"))
# This should exit almost immediately, and use the old equations
regressor.fit(X, y)
self.assertLessEqual(regressor.equations_.iloc[-1]["loss"], 1e-4)
np.testing.assert_almost_equal(regressor.predict(X), y, decimal=1)
# Tweak model selection:
regressor.set_params(model_selection="best")
self.assertEqual(regressor.get_params()["model_selection"], "best")
self.assertTrue("None" not in regressor.__repr__())
self.assertTrue(">>>>" in regressor.__repr__())
def test_warm_start_set_at_init(self):
# Smoke test for bug where warm_start=True is set at init
y = self.X[:, 0]
regressor = PySRRegressor(warm_start=True, max_evals=10)
regressor.fit(self.X, y)
def test_noisy(self):
y = self.X[:, [0, 1]] ** 2 + self.rstate.randn(self.X.shape[0], 1) * 0.05
model = PySRRegressor(
# Test that passing a single operator works:
unary_operators="sq(x) = x^2",
binary_operators="plus",
extra_sympy_mappings={"sq": lambda x: x**2},
**self.default_test_kwargs,
procs=0,
denoise=True,
early_stop_condition="stop_if(loss, complexity) = loss < 0.05 && complexity == 2",
)
# We expect in this case that the "best"
# equation should be the right one:
model.set_params(model_selection="best")
# Also try without a temp equation file:
model.set_params(temp_equation_file=False)
model.fit(self.X, y)
self.assertLessEqual(model.get_best()[1]["loss"], 1e-2)
self.assertLessEqual(model.get_best()[1]["loss"], 1e-2)
def test_pandas_resample_with_nested_constraints(self):
X = pd.DataFrame(
{
"T": self.rstate.randn(500),
"x": self.rstate.randn(500),
"unused_feature": self.rstate.randn(500),
}
)
true_fn = lambda x: np.array(x["T"] + x["x"] ** 2 + 1.323837)
y = true_fn(X)
noise = self.rstate.randn(500) * 0.01
y = y + noise
# We also test y as a pandas array:
y = pd.Series(y)
# Resampled array is a different order of features:
Xresampled = pd.DataFrame(
{
"unused_feature": self.rstate.randn(100),
"x": self.rstate.randn(100),
"T": self.rstate.randn(100),
}
)
model = PySRRegressor(
unary_operators=[],
binary_operators=["+", "*", "/", "-"],
**self.default_test_kwargs,
denoise=True,
nested_constraints={"/": {"+": 1, "-": 1}, "+": {"*": 4}},
early_stop_condition="stop_if(loss, complexity) = loss < 1e-3 && complexity == 7",
)
model.fit(X, y, Xresampled=Xresampled)
self.assertNotIn("unused_feature", model.latex())
self.assertIn("T", model.latex())
self.assertIn("x", model.latex())
self.assertLessEqual(model.get_best()["loss"], 1e-1)
fn = model.get_best()["lambda_format"]
X2 = pd.DataFrame(
{
"T": self.rstate.randn(100),
"unused_feature": self.rstate.randn(100),
"x": self.rstate.randn(100),
}
)
self.assertLess(np.average((fn(X2) - true_fn(X2)) ** 2), 1e-1)
self.assertLess(np.average((model.predict(X2) - true_fn(X2)) ** 2), 1e-1)
def test_high_dim_selection_early_stop(self):
X = pd.DataFrame({f"k{i}": self.rstate.randn(10000) for i in range(10)})
Xresampled = pd.DataFrame({f"k{i}": self.rstate.randn(100) for i in range(10)})
y = X["k7"] ** 2 + np.cos(X["k9"]) * 3
model = PySRRegressor(
unary_operators=["cos"],
select_k_features=3,
early_stop_condition=1e-4, # Stop once most accurate equation is <1e-4 MSE
maxsize=12,
**self.default_test_kwargs,
)
model.set_params(model_selection="accuracy")
model.fit(X, y, Xresampled=Xresampled)
self.assertLess(np.average((model.predict(X) - y) ** 2), 1e-4)
# Again, but with numpy arrays:
model.fit(X.values, y.values, Xresampled=Xresampled.values)
self.assertLess(np.average((model.predict(X.values) - y.values) ** 2), 1e-4)
def test_load_model(self):
"""See if we can load a ran model from the equation file."""
csv_file_data = """
Complexity,Loss,Equation
1,0.19951081,"1.9762075"
3,0.12717344,"(f0 + 1.4724599)"
4,0.104823045,"pow_abs(2.2683423, cos(f3))\""""
# Strip the indents:
csv_file_data = "\n".join([l.strip() for l in csv_file_data.split("\n")])
for from_backup in [False, True]:
rand_dir = Path(tempfile.mkdtemp())
equation_filename = str(rand_dir / "equation.csv")
with open(equation_filename + (".bkup" if from_backup else ""), "w") as f:
f.write(csv_file_data)
model = PySRRegressor.from_file(
equation_filename,
n_features_in=5,
feature_names_in=["f0", "f1", "f2", "f3", "f4"],
binary_operators=["+", "*", "/", "-", "^"],
unary_operators=["cos"],
)
X = self.rstate.rand(100, 5)
y_truth = 2.2683423 ** np.cos(X[:, 3])
y_test = model.predict(X, 2)
np.testing.assert_allclose(y_truth, y_test)
def test_load_model_simple(self):
# Test that we can simply load a model from its equation file.
y = self.X[:, [0, 1]] ** 2
model = PySRRegressor(
# Test that passing a single operator works:
unary_operators="sq(x) = x^2",
binary_operators="plus",
extra_sympy_mappings={"sq": lambda x: x**2},
**self.default_test_kwargs,
procs=0,
denoise=True,
early_stop_condition="stop_if(loss, complexity) = loss < 0.05 && complexity == 2",
)
rand_dir = Path(tempfile.mkdtemp())
equation_file = rand_dir / "equations.csv"
model.set_params(temp_equation_file=False)
model.set_params(equation_file=equation_file)
model.fit(self.X, y)
# lambda functions are removed from the pickling, so we need
# to pass it during the loading:
model2 = PySRRegressor.from_file(
model.equation_file_, extra_sympy_mappings={"sq": lambda x: x**2}
)
np.testing.assert_allclose(model.predict(self.X), model2.predict(self.X))
# Try again, but using only the pickle file:
for file_to_delete in [str(equation_file), str(equation_file) + ".bkup"]:
if os.path.exists(file_to_delete):
os.remove(file_to_delete)
pickle_file = rand_dir / "equations.pkl"
model3 = PySRRegressor.from_file(
model.equation_file_, extra_sympy_mappings={"sq": lambda x: x**2}
)
np.testing.assert_allclose(model.predict(self.X), model3.predict(self.X))
def manually_create_model(equations, feature_names=None):
if feature_names is None:
feature_names = ["x0", "x1"]
model = PySRRegressor(
progress=False,
niterations=1,
extra_sympy_mappings={},
output_jax_format=False,
model_selection="accuracy",
equation_file="equation_file.csv",
)
# Set up internal parameters as if it had been fitted:
if isinstance(equations, list):
# Multi-output.
model.equation_file_ = "equation_file.csv"
model.nout_ = len(equations)
model.selection_mask_ = None
model.feature_names_in_ = np.array(feature_names, dtype=object)
for i in range(model.nout_):
equations[i]["complexity loss equation".split(" ")].to_csv(
f"equation_file.csv.out{i+1}.bkup"
)
else:
model.equation_file_ = "equation_file.csv"
model.nout_ = 1
model.selection_mask_ = None
model.feature_names_in_ = np.array(feature_names, dtype=object)
equations["complexity loss equation".split(" ")].to_csv(
"equation_file.csv.bkup"
)
model.refresh()
return model
class TestBest(unittest.TestCase):
def setUp(self):
self.rstate = np.random.RandomState(0)
self.X = self.rstate.randn(10, 2)
self.y = np.cos(self.X[:, 0]) ** 2
equations = pd.DataFrame(
{
"equation": ["1.0", "cos(x0)", "square(cos(x0))"],
"loss": [1.0, 0.1, 1e-5],
"complexity": [1, 2, 3],
}
)
self.model = manually_create_model(equations)
self.equations_ = self.model.equations_
def test_best(self):
self.assertEqual(self.model.sympy(), sympy.cos(sympy.Symbol("x0")) ** 2)
def test_index_selection(self):
self.assertEqual(self.model.sympy(-1), sympy.cos(sympy.Symbol("x0")) ** 2)
self.assertEqual(self.model.sympy(2), sympy.cos(sympy.Symbol("x0")) ** 2)
self.assertEqual(self.model.sympy(1), sympy.cos(sympy.Symbol("x0")))
self.assertEqual(self.model.sympy(0), 1.0)
def test_best_tex(self):
self.assertEqual(self.model.latex(), "\\cos^{2}{\\left(x_{0} \\right)}")
def test_best_lambda(self):
X = self.X
y = self.y
for f in [self.model.predict, self.equations_.iloc[-1]["lambda_format"]]:
np.testing.assert_almost_equal(f(X), y, decimal=3)
def test_all_selection_strategies(self):
equations = pd.DataFrame(
dict(
loss=[1.0, 0.1, 0.01, 0.001 * 1.4, 0.001],
score=[0.5, 1.0, 0.5, 0.5, 0.3],
)
)
idx_accuracy = idx_model_selection(equations, "accuracy")
self.assertEqual(idx_accuracy, 4)
idx_best = idx_model_selection(equations, "best")
self.assertEqual(idx_best, 3)
idx_score = idx_model_selection(equations, "score")
self.assertEqual(idx_score, 1)
class TestFeatureSelection(unittest.TestCase):
def setUp(self):
self.rstate = np.random.RandomState(0)
def test_feature_selection(self):
X = self.rstate.randn(20000, 5)
y = X[:, 2] ** 2 + X[:, 3] ** 2
selected = run_feature_selection(X, y, select_k_features=2)
self.assertEqual(sorted(selected), [2, 3])
def test_feature_selection_handler(self):
X = self.rstate.randn(20000, 5)
y = X[:, 2] ** 2 + X[:, 3] ** 2
var_names = [f"x{i}" for i in range(5)]
selected_X, selection = _handle_feature_selection(
X,
select_k_features=2,
variable_names=var_names,
y=y,
)
self.assertTrue((2 in selection) and (3 in selection))
selected_var_names = [var_names[i] for i in selection]
self.assertEqual(set(selected_var_names), set("x2 x3".split(" ")))
np.testing.assert_array_equal(
np.sort(selected_X, axis=1), np.sort(X[:, [2, 3]], axis=1)
)
class TestMiscellaneous(unittest.TestCase):
"""Test miscellaneous functions."""
def test_csv_to_pkl_conversion(self):
"""Test that csv filename to pkl filename works as expected."""
tmpdir = Path(tempfile.mkdtemp())
equation_file = tmpdir / "equations.389479384.28378374.csv"
expected_pkl_file = tmpdir / "equations.389479384.28378374.pkl"
# First, test inputting the paths:
test_pkl_file = _csv_filename_to_pkl_filename(equation_file)
self.assertEqual(test_pkl_file, str(expected_pkl_file))
# Next, test inputting the strings.
test_pkl_file = _csv_filename_to_pkl_filename(str(equation_file))
self.assertEqual(test_pkl_file, str(expected_pkl_file))
def test_deprecation(self):
"""Ensure that deprecation works as expected.
This should give a warning, and sets the correct value.
"""
with self.assertWarns(FutureWarning):
model = PySRRegressor(fractionReplaced=0.2)
# This is a deprecated parameter, so we should get a warning.
# The correct value should be set:
self.assertEqual(model.fraction_replaced, 0.2)
def test_size_warning(self):
"""Ensure that a warning is given for a large input size."""
model = PySRRegressor()
X = np.random.randn(10001, 2)
y = np.random.randn(10001)
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(Exception) as context:
model.fit(X, y)
self.assertIn("more than 10,000", str(context.exception))
def test_feature_warning(self):
"""Ensure that a warning is given for large number of features."""
model = PySRRegressor()
X = np.random.randn(100, 10)
y = np.random.randn(100)
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(Exception) as context:
model.fit(X, y)
self.assertIn("with 10 features or more", str(context.exception))
def test_deterministic_warnings(self):
"""Ensure that warnings are given for determinism"""
model = PySRRegressor(random_state=0)
X = np.random.randn(100, 2)
y = np.random.randn(100)
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(Exception) as context:
model.fit(X, y)
self.assertIn("`deterministic`", str(context.exception))
def test_deterministic_errors(self):
"""Setting deterministic without random_state should error"""
model = PySRRegressor(deterministic=True)
X = np.random.randn(100, 2)
y = np.random.randn(100)
with self.assertRaises(ValueError):
model.fit(X, y)
def test_changed_options_warning(self):
"""Check that a warning is given if Julia options are changed."""
if julia_helpers.julia_kwargs_at_initialization is None:
julia_helpers.init_julia(julia_kwargs={"threads": 2, "optimize": 3})
cur_init = julia_helpers.julia_kwargs_at_initialization
threads_to_change = cur_init["threads"] + 1
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(Exception) as context:
julia_helpers.init_julia(
julia_kwargs={"threads": threads_to_change, "optimize": 3}
)
self.assertIn("Julia has already started", str(context.exception))
self.assertIn("threads", str(context.exception))
def test_extra_sympy_mappings_undefined(self):
"""extra_sympy_mappings=None errors for custom operators"""
model = PySRRegressor(unary_operators=["square2(x) = x^2"])
X = np.random.randn(100, 2)
y = np.random.randn(100)
with self.assertRaises(ValueError):
model.fit(X, y)
def test_sympy_function_fails_as_variable(self):
model = PySRRegressor()
X = np.random.randn(100, 2)
y = np.random.randn(100)
with self.assertRaises(ValueError) as cm:
model.fit(X, y, variable_names=["x1", "N"])
self.assertIn("Variable name", str(cm.exception))
def test_bad_variable_names_fail(self):
model = PySRRegressor()
X = np.random.randn(100, 1)
y = np.random.randn(100)
with self.assertRaises(ValueError) as cm:
model.fit(X, y, variable_names=["Tr(Tij)"])
self.assertIn("Invalid variable name", str(cm.exception))
with self.assertRaises(ValueError) as cm:
model.fit(X, y, variable_names=["f{c}"])
self.assertIn("Invalid variable name", str(cm.exception))
def test_pickle_with_temp_equation_file(self):
"""If we have a temporary equation file, unpickle the estimator."""
model = PySRRegressor(
populations=int(1 + DEFAULT_POPULATIONS / 5),
temp_equation_file=True,
procs=0,
multithreading=False,
)
nout = 3
X = np.random.randn(100, 2)
y = np.random.randn(100, nout)
model.fit(X, y)
contents = model.equation_file_contents_.copy()
y_predictions = model.predict(X)
equation_file_base = model.equation_file_
for i in range(1, nout + 1):
assert not os.path.exists(str(equation_file_base) + f".out{i}.bkup")
with tempfile.NamedTemporaryFile() as pickle_file:
pkl.dump(model, pickle_file)
pickle_file.seek(0)
model2 = pkl.load(pickle_file)
contents2 = model2.equation_file_contents_
cols_to_check = ["equation", "loss", "complexity"]
for frame1, frame2 in zip(contents, contents2):
pd.testing.assert_frame_equal(frame1[cols_to_check], frame2[cols_to_check])
y_predictions2 = model2.predict(X)
np.testing.assert_array_equal(y_predictions, y_predictions2)
def test_scikit_learn_compatibility(self):
"""Test PySRRegressor compatibility with scikit-learn."""
model = PySRRegressor(
niterations=int(1 + DEFAULT_NITERATIONS / 10),
populations=int(1 + DEFAULT_POPULATIONS / 3),
ncyclesperiteration=int(2 + DEFAULT_NCYCLES / 10),
verbosity=0,
progress=False,
random_state=0,
deterministic=True, # Deterministic as tests require this.
procs=0,
multithreading=False,
warm_start=False,
temp_equation_file=True,
) # Return early.
check_generator = check_estimator(model, generate_only=True)
exception_messages = []
for _, check in check_generator:
if check.func.__name__ == "check_complex_data":
# We can use complex data, so avoid this check.
continue
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check(model)
print("Passed", check.func.__name__)
except Exception:
error_message = str(traceback.format_exc())
exception_messages.append(
f"{check.func.__name__}:\n" + error_message + "\n"
)
print("Failed", check.func.__name__, "with:")
# Add a leading tab to error message, which
# might be multi-line:
print("\n".join([(" " * 4) + row for row in error_message.split("\n")]))
# If any checks failed don't let the test pass.
self.assertEqual(len(exception_messages), 0)
TRUE_PREAMBLE = "\n".join(
[
r"\usepackage{breqn}",
r"\usepackage{booktabs}",
"",
"...",
"",
]
)
class TestLaTeXTable(unittest.TestCase):
def setUp(self):
equations = pd.DataFrame(
dict(
equation=["x0", "cos(x0)", "x0 + x1 - cos(x1 * x0)"],
loss=[1.052, 0.02315, 1.12347e-15],
complexity=[1, 2, 8],
)
)
self.model = manually_create_model(equations)
self.maxDiff = None
def create_true_latex(self, middle_part, include_score=False):
if include_score:
true_latex_table_str = r"""
\begin{table}[h]
\begin{center}
\begin{tabular}{@{}cccc@{}}
\toprule
Equation & Complexity & Loss & Score \\
\midrule"""
else:
true_latex_table_str = r"""
\begin{table}[h]
\begin{center}
\begin{tabular}{@{}ccc@{}}
\toprule
Equation & Complexity & Loss \\
\midrule"""
true_latex_table_str += middle_part
true_latex_table_str += r"""\bottomrule
\end{tabular}
\end{center}
\end{table}
"""
# First, remove empty lines:
true_latex_table_str = "\n".join(
[line.strip() for line in true_latex_table_str.split("\n") if len(line) > 0]
)
return true_latex_table_str.strip()
def test_simple_table(self):
latex_table_str = self.model.latex_table(
columns=["equation", "complexity", "loss"]
)
middle_part = r"""
$y = x_{0}$ & $1$ & $1.05$ \\
$y = \cos{\left(x_{0} \right)}$ & $2$ & $0.0232$ \\
$y = x_{0} + x_{1} - \cos{\left(x_{0} x_{1} \right)}$ & $8$ & $1.12 \cdot 10^{-15}$ \\
"""
true_latex_table_str = (
TRUE_PREAMBLE + "\n" + self.create_true_latex(middle_part)
)
self.assertEqual(latex_table_str, true_latex_table_str)
def test_other_precision(self):
latex_table_str = self.model.latex_table(
precision=5, columns=["equation", "complexity", "loss"]
)
middle_part = r"""
$y = x_{0}$ & $1$ & $1.0520$ \\
$y = \cos{\left(x_{0} \right)}$ & $2$ & $0.023150$ \\
$y = x_{0} + x_{1} - \cos{\left(x_{0} x_{1} \right)}$ & $8$ & $1.1235 \cdot 10^{-15}$ \\
"""
true_latex_table_str = (
TRUE_PREAMBLE + "\n" + self.create_true_latex(middle_part)
)
self.assertEqual(latex_table_str, true_latex_table_str)
def test_include_score(self):
latex_table_str = self.model.latex_table()
middle_part = r"""
$y = x_{0}$ & $1$ & $1.05$ & $0.0$ \\
$y = \cos{\left(x_{0} \right)}$ & $2$ & $0.0232$ & $3.82$ \\
$y = x_{0} + x_{1} - \cos{\left(x_{0} x_{1} \right)}$ & $8$ & $1.12 \cdot 10^{-15}$ & $5.11$ \\
"""
true_latex_table_str = (
TRUE_PREAMBLE
+ "\n"
+ self.create_true_latex(middle_part, include_score=True)
)
self.assertEqual(latex_table_str, true_latex_table_str)
def test_last_equation(self):
latex_table_str = self.model.latex_table(
indices=[2], columns=["equation", "complexity", "loss"]
)
middle_part = r"""
$y = x_{0} + x_{1} - \cos{\left(x_{0} x_{1} \right)}$ & $8$ & $1.12 \cdot 10^{-15}$ \\
"""
true_latex_table_str = (
TRUE_PREAMBLE + "\n" + self.create_true_latex(middle_part)
)
self.assertEqual(latex_table_str, true_latex_table_str)
def test_multi_output(self):
equations1 = pd.DataFrame(
dict(
equation=["x0", "cos(x0)", "x0 + x1 - cos(x1 * x0)"],
loss=[1.052, 0.02315, 1.12347e-15],
complexity=[1, 2, 8],
)
)
equations2 = pd.DataFrame(
dict(
equation=["x1", "cos(x1)", "x0 * x0 * x1"],
loss=[1.32, 0.052, 2e-15],
complexity=[1, 2, 5],
)
)
equations = [equations1, equations2]
model = manually_create_model(equations)
middle_part_1 = r"""
$y_{0} = x_{0}$ & $1$ & $1.05$ & $0.0$ \\
$y_{0} = \cos{\left(x_{0} \right)}$ & $2$ & $0.0232$ & $3.82$ \\
$y_{0} = x_{0} + x_{1} - \cos{\left(x_{0} x_{1} \right)}$ & $8$ & $1.12 \cdot 10^{-15}$ & $5.11$ \\
"""
middle_part_2 = r"""
$y_{1} = x_{1}$ & $1$ & $1.32$ & $0.0$ \\
$y_{1} = \cos{\left(x_{1} \right)}$ & $2$ & $0.0520$ & $3.23$ \\
$y_{1} = x_{0}^{2} x_{1}$ & $5$ & $2.00 \cdot 10^{-15}$ & $10.3$ \\
"""
true_latex_table_str = "\n\n".join(
self.create_true_latex(part, include_score=True)
for part in [middle_part_1, middle_part_2]
)
true_latex_table_str = TRUE_PREAMBLE + "\n" + true_latex_table_str
latex_table_str = model.latex_table()
self.assertEqual(latex_table_str, true_latex_table_str)
def test_latex_float_precision(self):
"""Test that we can print latex expressions with custom precision"""
expr = sympy.Float(4583.4485748, dps=50)
self.assertEqual(to_latex(expr, prec=6), r"4583.45")
self.assertEqual(to_latex(expr, prec=5), r"4583.4")
self.assertEqual(to_latex(expr, prec=4), r"4583.")
self.assertEqual(to_latex(expr, prec=3), r"4.58 \cdot 10^{3}")
self.assertEqual(to_latex(expr, prec=2), r"4.6 \cdot 10^{3}")
# Multiple numbers:
x = sympy.Symbol("x")
expr = x * 3232.324857384 - 1.4857485e-10
self.assertEqual(
to_latex(expr, prec=2), r"3.2 \cdot 10^{3} x - 1.5 \cdot 10^{-10}"
)
self.assertEqual(
to_latex(expr, prec=3), r"3.23 \cdot 10^{3} x - 1.49 \cdot 10^{-10}"
)
self.assertEqual(
to_latex(expr, prec=8), r"3232.3249 x - 1.4857485 \cdot 10^{-10}"
)
def test_latex_break_long_equation(self):
"""Test that we can break a long equation inside the table"""
long_equation = """
- cos(x1 * x0) + 3.2 * x0 - 1.2 * x1 + x1 * x1 * x1 + x0 * x0 * x0
+ 5.2 * sin(0.3256 * sin(x2) - 2.6 * x0) + x0 * x0 * x0 * x0 * x0
+ cos(cos(x1 * x0) + 3.2 * x0 - 1.2 * x1 + x1 * x1 * x1 + x0 * x0 * x0)
"""
long_equation = "".join(long_equation.split("\n")).strip()
equations = pd.DataFrame(
dict(
equation=["x0", "cos(x0)", long_equation],
loss=[1.052, 0.02315, 1.12347e-15],
complexity=[1, 2, 30],
)
)
model = manually_create_model(equations)
latex_table_str = model.latex_table()
middle_part = r"""
$y = x_{0}$ & $1$ & $1.05$ & $0.0$ \\
$y = \cos{\left(x_{0} \right)}$ & $2$ & $0.0232$ & $3.82$ \\
\begin{minipage}{0.8\linewidth} \vspace{-1em} \begin{dmath*} y = x_{0}^{5} + x_{0}^{3} + 3.20 x_{0} + x_{1}^{3} - 1.20 x_{1} - 5.20 \sin{\left(2.60 x_{0} - 0.326 \sin{\left(x_{2} \right)} \right)} - \cos{\left(x_{0} x_{1} \right)} + \cos{\left(x_{0}^{3} + 3.20 x_{0} + x_{1}^{3} - 1.20 x_{1} + \cos{\left(x_{0} x_{1} \right)} \right)} \end{dmath*} \end{minipage} & $30$ & $1.12 \cdot 10^{-15}$ & $1.09$ \\
"""
true_latex_table_str = (
TRUE_PREAMBLE
+ "\n"
+ self.create_true_latex(middle_part, include_score=True)
)
self.assertEqual(latex_table_str, true_latex_table_str)
def runtests():
"""Run all tests in test.py."""
suite = unittest.TestSuite()
loader = unittest.TestLoader()
test_cases = [
TestPipeline,
TestBest,
TestFeatureSelection,
TestMiscellaneous,
TestLaTeXTable,
]
for test_case in test_cases:
tests = loader.loadTestsFromTestCase(test_case)
suite.addTests(tests)
runner = unittest.TextTestRunner()
return runner.run(suite)
| 36,285 | 38.228108 | 413 | py |
PySR | PySR-master/pysr/test/test_torch.py | import unittest
import numpy as np
import pandas as pd
import platform
import sympy
from .. import sympy2torch, PySRRegressor
# Need to initialize Julia before importing torch...
def _import_torch():
if platform.system() == "Darwin":
# Import PyJulia, then Torch
from ..julia_helpers import init_julia
init_julia()
import torch
else:
# Import Torch, then PyJulia
# https://github.com/pytorch/pytorch/issues/78829
import torch
return torch
class TestTorch(unittest.TestCase):
def setUp(self):
np.random.seed(0)
def test_sympy2torch(self):
torch = _import_torch()
x, y, z = sympy.symbols("x y z")
cosx = 1.0 * sympy.cos(x) + y
X = torch.tensor(np.random.randn(1000, 3))
true = 1.0 * torch.cos(X[:, 0]) + X[:, 1]
torch_module = sympy2torch(cosx, [x, y, z])
self.assertTrue(
np.all(np.isclose(torch_module(X).detach().numpy(), true.detach().numpy()))
)
def test_pipeline_pandas(self):
torch = _import_torch()
X = pd.DataFrame(np.random.randn(100, 10))
y = np.ones(X.shape[0])
model = PySRRegressor(
progress=False,
max_evals=10000,
model_selection="accuracy",
extra_sympy_mappings={},
output_torch_format=True,
)
model.fit(X, y)
equations = pd.DataFrame(
{
"Equation": ["1.0", "cos(x1)", "square(cos(x1))"],
"Loss": [1.0, 0.1, 1e-5],
"Complexity": [1, 2, 3],
}
)
equations["Complexity Loss Equation".split(" ")].to_csv(
"equation_file.csv.bkup"
)
model.refresh(checkpoint_file="equation_file.csv")
tformat = model.pytorch()
self.assertEqual(str(tformat), "_SingleSymPyModule(expression=cos(x1)**2)")
np.testing.assert_almost_equal(
tformat(torch.tensor(X.values)).detach().numpy(),
np.square(np.cos(X.values[:, 1])), # Selection 1st feature
decimal=3,
)
def test_pipeline(self):
torch = _import_torch()
X = np.random.randn(100, 10)
y = np.ones(X.shape[0])
model = PySRRegressor(
progress=False,
max_evals=10000,
model_selection="accuracy",
output_torch_format=True,
)
model.fit(X, y)
equations = pd.DataFrame(
{
"Equation": ["1.0", "cos(x1)", "square(cos(x1))"],
"Loss": [1.0, 0.1, 1e-5],
"Complexity": [1, 2, 3],
}
)
equations["Complexity Loss Equation".split(" ")].to_csv(
"equation_file.csv.bkup"
)
model.refresh(checkpoint_file="equation_file.csv")
tformat = model.pytorch()
self.assertEqual(str(tformat), "_SingleSymPyModule(expression=cos(x1)**2)")
np.testing.assert_almost_equal(
tformat(torch.tensor(X)).detach().numpy(),
np.square(np.cos(X[:, 1])), # 2nd feature
decimal=3,
)
def test_mod_mapping(self):
torch = _import_torch()
x, y, z = sympy.symbols("x y z")
expression = x**2 + sympy.atanh(sympy.Mod(y + 1, 2) - 1) * 3.2 * z
module = sympy2torch(expression, [x, y, z])
X = torch.rand(100, 3).float() * 10
true_out = (
X[:, 0] ** 2 + torch.atanh(torch.fmod(X[:, 1] + 1, 2) - 1) * 3.2 * X[:, 2]
)
torch_out = module(X)
np.testing.assert_array_almost_equal(
true_out.detach(), torch_out.detach(), decimal=3
)
def test_custom_operator(self):
torch = _import_torch()
X = np.random.randn(100, 3)
y = np.ones(X.shape[0])
model = PySRRegressor(
progress=False,
max_evals=10000,
model_selection="accuracy",
output_torch_format=True,
)
model.fit(X, y)
equations = pd.DataFrame(
{
"Equation": ["1.0", "mycustomoperator(x1)"],
"Loss": [1.0, 0.1],
"Complexity": [1, 2],
}
)
equations["Complexity Loss Equation".split(" ")].to_csv(
"equation_file_custom_operator.csv.bkup"
)
model.set_params(
equation_file="equation_file_custom_operator.csv",
extra_sympy_mappings={"mycustomoperator": sympy.sin},
extra_torch_mappings={"mycustomoperator": torch.sin},
)
model.refresh(checkpoint_file="equation_file_custom_operator.csv")
self.assertEqual(str(model.sympy()), "sin(x1)")
# Will automatically use the set global state from get_hof.
tformat = model.pytorch()
self.assertEqual(str(tformat), "_SingleSymPyModule(expression=sin(x1))")
np.testing.assert_almost_equal(
tformat(torch.tensor(X)).detach().numpy(),
np.sin(X[:, 1]),
decimal=3,
)
def test_feature_selection_custom_operators(self):
torch = _import_torch()
rstate = np.random.RandomState(0)
X = pd.DataFrame({f"k{i}": rstate.randn(2000) for i in range(10, 21)})
cos_approx = lambda x: 1 - (x**2) / 2 + (x**4) / 24 + (x**6) / 720
y = X["k15"] ** 2 + 2 * cos_approx(X["k20"])
model = PySRRegressor(
progress=False,
unary_operators=["cos_approx(x) = 1 - x^2 / 2 + x^4 / 24 + x^6 / 720"],
select_k_features=3,
maxsize=10,
early_stop_condition=1e-5,
extra_sympy_mappings={"cos_approx": cos_approx},
extra_torch_mappings={"cos_approx": cos_approx},
random_state=0,
deterministic=True,
procs=0,
multithreading=False,
)
np.random.seed(0)
model.fit(X.values, y.values)
torch_module = model.pytorch()
np_output = model.predict(X.values)
torch_output = torch_module(torch.tensor(X.values)).detach().numpy()
np.testing.assert_almost_equal(y.values, np_output, decimal=3)
np.testing.assert_almost_equal(y.values, torch_output, decimal=3)
def runtests():
"""Run all tests in test_torch.py."""
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(TestTorch))
runner = unittest.TextTestRunner()
return runner.run(suite)
| 6,535 | 30.12381 | 87 | py |
PySR | PySR-master/pysr/test/__init__.py | from .test import runtests
from .test_env import runtests as runtests_env
from .test_jax import runtests as runtests_jax
from .test_torch import runtests as runtests_torch
from .test_cli import runtests as runtests_cli
| 219 | 35.666667 | 50 | py |
CongFu | CongFu-main/utils.py | import torch
from torch_geometric.data import Data
import pandas as pd
import numpy as np
from sklearn.metrics import precision_recall_curve, auc, roc_auc_score
from typing import Tuple
import json
from rdkit import Chem
from rdkit.Chem.rdchem import BondType as BT
from tqdm import tqdm
ATOM_LIST = list(range(1, 119))
CHIRALITY_LIST = [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
]
BOND_LIST = [BT.SINGLE, BT.DOUBLE, BT.TRIPLE, BT.AROMATIC]
BONDDIR_LIST = [
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
]
def split_fold(dataset, fold: dict[str, list[int]]):
train_indices, test_indices = fold["train"], fold["test"]
X_train = dataset.iloc[train_indices]
X_test = dataset.iloc[test_indices]
return X_train, X_test
def calculate_roc_auc(targets, preds):
return roc_auc_score(targets, preds)
def calculate_auprc(targets, preds):
precision_scores, recall_scores, __ = precision_recall_curve(targets, preds)
return auc(recall_scores, precision_scores)
def get_datasets(data_folder_path: str, fold_number: int, synergy_score: str, transductive: bool, inductive_set_name: str) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
cell_lines = pd.read_feather(data_folder_path + f"cell_lines.feather").set_index("cell_line_name")
cell_lines = cell_lines.astype(np.float32)
if transductive:
dataset = pd.read_feather(data_folder_path + f"{synergy_score}/{synergy_score}.feather")
with open(data_folder_path + f"{synergy_score}/{synergy_score}.json") as f:
folds = json.load(f)
fold = folds[f"fold_{fold_number}"]
train_dataset, test_dataset = split_fold(dataset, fold)
else:
inductive_set_name = inductive_set_name
train_dataset = pd.read_feather(data_folder_path + f"{synergy_score}/{inductive_set_name}/train_{fold_number}.feather")
test_dataset = pd.read_feather(data_folder_path + f"{synergy_score}/{inductive_set_name}/test_{fold_number}.feather")
dataset = pd.concat((train_dataset, test_dataset))
return dataset, train_dataset, test_dataset, cell_lines
def _get_drug_tokens(smiles):
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
N = mol.GetNumAtoms()
M = mol.GetNumBonds()
type_idx = []
chirality_idx = []
atomic_number = []
for atom in mol.GetAtoms():
type_idx.append(ATOM_LIST.index(atom.GetAtomicNum()))
chirality_idx.append(CHIRALITY_LIST.index(atom.GetChiralTag()))
atomic_number.append(atom.GetAtomicNum())
x1 = torch.tensor(type_idx, dtype=torch.long).view(-1, 1)
x2 = torch.tensor(chirality_idx, dtype=torch.long).view(-1, 1)
x = torch.cat([x1, x2], dim=-1)
row, col, edge_feat = [], [], []
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_feat.append([
BOND_LIST.index(bond.GetBondType()),
BONDDIR_LIST.index(bond.GetBondDir())
])
edge_feat.append([
BOND_LIST.index(bond.GetBondType()),
BONDDIR_LIST.index(bond.GetBondDir())
])
edge_index = torch.tensor([row, col], dtype=torch.long)
edge_attr = torch.tensor(np.array(edge_feat), dtype=torch.long)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
return data
def get_mol_dict(df):
mols = pd.concat([
df.rename(columns={'Drug1_ID': 'id', 'Drug1': 'drug'})[['id', 'drug']],
df.rename(columns={'Drug2_ID': 'id', 'Drug2': 'drug'})[['id', 'drug']]
],
axis=0, ignore_index=True
).drop_duplicates(subset=['id'])
dct = {}
for _, x in tqdm(mols.iterrows(), total=len(mols)):
dct[x['id']] = _get_drug_tokens(x['drug'])
return dct | 3,984 | 33.95614 | 188 | py |
CongFu | CongFu-main/dataset.py | import torch
from torch.utils.data import Dataset
DRUG1_ID_COLUMN_NAME = "Drug1_ID"
DRUG2_ID_COLUMN_NAME= "Drug2_ID"
CELL_LINE_COLUMN_NAME = "Cell_Line_ID"
class DrugCombDataset(Dataset):
def __init__(self, drugcomb, cell_lines, mol_mapping, transform=None):
self.drugcomb = drugcomb
self.mol_mapping = mol_mapping
self.cell_lines = cell_lines
self.targets = torch.from_numpy(drugcomb['target'].values)
self.transform = transform
def __len__(self):
return len(self.drugcomb)
def __getitem__(self, idx):
sample = self.drugcomb.iloc[idx]
drug1 = sample[DRUG1_ID_COLUMN_NAME]
drug2 = sample[DRUG2_ID_COLUMN_NAME]
drug1_tokens = self.mol_mapping[drug1]
drug2_tokens = self.mol_mapping[drug2]
if self.transform:
drug1_tokens = self.transform(drug1_tokens)
drug2_tokens = self.transform(drug2_tokens)
cell_line_name = sample[CELL_LINE_COLUMN_NAME]
cell_line_embeddings = self.cell_lines.loc[cell_line_name].values.flatten()
cell_line_embeddings = torch.tensor(cell_line_embeddings)
target = self.targets[idx].unsqueeze(-1).float()
return (drug1_tokens, drug2_tokens, cell_line_embeddings, target) | 1,279 | 33.594595 | 83 | py |
CongFu | CongFu-main/layers.py | import torch
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.data import Data
from torch_geometric.utils import add_self_loops
from typing import Union, Tuple
class ContextPropagation(nn.Module):
def __init__(self, context_input_dim: int, graph_input_dim: int, out_channels: int) -> None:
super().__init__()
self.context_linear = nn.Linear(context_input_dim, out_channels, bias = True)
self.x_linear = nn.Linear(graph_input_dim, out_channels, bias = False)
def forward(self, context: torch.Tensor, graph: Data) -> Data:
x = graph.x
context = torch.repeat_interleave(context, graph.ptr.diff(), dim=0)
context_out = self.context_linear(context)
x_out = self.x_linear(x)
graph.x = context_out + x_out
return graph
class GINEConv(MessagePassing):
def __init__(self, input_dim: int, output_dim: int, num_bond_type: int, num_bond_direction: int) -> None:
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(input_dim, 2*input_dim),
nn.ReLU(),
nn.Linear(2*input_dim, output_dim)
)
self.edge_embedding1 = nn.Embedding(num_bond_type, input_dim)
self.edge_embedding2 = nn.Embedding(num_bond_direction, input_dim)
nn.init.xavier_uniform_(self.edge_embedding1.weight)
nn.init.xavier_uniform_(self.edge_embedding2.weight)
def forward(self, x: torch.Tensor, edge_index: torch.Tensor, edge_attr: torch.Tensor) -> torch.Tensor:
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))[0]
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:, 0] = 4
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
edge_embeddings = self.edge_embedding1(edge_attr[:, 0]) # + self.edge_embedding2(edge_attr[:, 1])
return self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
def message(self, x_j: torch.Tensor, edge_attr: torch.Tensor) -> torch.Tensor:
return (x_j + edge_attr).relu()
def update(self, aggr_out: torch.Tensor) -> torch.Tensor:
return self.mlp(aggr_out)
class GraphUpdate(nn.Module):
def __init__(self, gnn: MessagePassing, output_dim: int, use_relu: bool = True) -> None:
super().__init__()
self.gnn = gnn
self.batch_norm = nn.BatchNorm1d(output_dim)
self.use_relu = use_relu
def forward(self, graph: Data) -> Data:
x = self.gnn(graph.x, graph.edge_index, graph.edge_attr)
x = self.batch_norm(x)
if self.use_relu:
x = F.relu(x)
graph.x = x
return graph
class Bottleneck(nn.Module):
def __init__(self, gnn: MessagePassing) -> None:
super().__init__()
self.gnn = gnn
def forward(self, graphA: Data, graphB: Data, context: torch.Tensor) -> torch.Tensor:
local_context_A = self.gnn((graphA.x, context), graphA.context_x_edges)
local_context_B = self.gnn((graphB.x, context), graphB.context_x_edges)
return local_context_A + local_context_B
class BasicLayer(nn.Module):
def __init__(self, out_channels: int, graph_update_gnn: MessagePassing, last_layer: bool = False):
super().__init__()
self.graph_update = GraphUpdate(graph_update_gnn, out_channels, use_relu= not last_layer)
def _forward(self, graph: Data) -> Data:
return self.graph_update(graph)
def forward(self, graphA: Data, graphB: Data) -> Tuple[Data, Data]:
graphA = self._forward(graphA)
graphB = self._forward(graphB)
return graphA, graphB
class CongFuLayer(nn.Module):
def __init__(self, context_input_dim: int, graph_input_dim: int, out_channels: int,
graph_update_gnn: MessagePassing, bottleneck_gnn: MessagePassing, last_layer: bool = False):
super().__init__()
self.context_propagation = ContextPropagation(context_input_dim, graph_input_dim, out_channels)
self.graph_update = GraphUpdate(graph_update_gnn, out_channels, use_relu= not last_layer)
self.bottleneck = Bottleneck(bottleneck_gnn)
def forward(self, graphA: Data, graphB: Data, context: torch.Tensor) -> Tuple[Data, Data, torch.Tensor]:
graphA = self.context_propagation(context, graphA)
graphB = self.context_propagation(context, graphB)
graphA = self.graph_update(graphA)
graphB = self.graph_update(graphB)
context = self.bottleneck(graphA, graphB, context)
return graphA, graphB, context
class LinearBlock(nn.Module):
''' Linear -> LeakyReLU -> Dropout'''
def __init__(self, input_dim: int, output_dim: int, activation = "relu", dropout: int = 0.0, slope: float = -0.01):
super().__init__()
self.fc = nn.Linear(input_dim, output_dim)
if activation == "relu":
self.activation = nn.ReLU()
elif activation == "leaky_relu":
self.activation = nn.LeakyReLU(slope)
self.dropout = nn.Dropout(dropout)
def forward(self, X: torch.Tensor) -> torch.Tensor:
return self.dropout(self.activation(self.fc(X)))
def create_mlp(input_dim: int, hidden_dims: list[int], output_dim: int, activation: str, dropout: float = 0.0, slope: float = -0.01) -> nn.Sequential:
mlp = nn.Sequential(
LinearBlock(input_dim, hidden_dims[0], activation, dropout, slope),
*[LinearBlock(input_, output_, activation, dropout, slope=slope) for input_, output_ in zip(hidden_dims, hidden_dims[1:])],
nn.Linear(hidden_dims[-1], output_dim)
)
linear_layers = [m for m in mlp.modules() if (isinstance(m, nn.Linear))]
for layer in linear_layers[:-1]:
nn.init.kaiming_uniform_(layer.weight, a=slope, nonlinearity=activation)
nn.init.uniform_(layer.bias, -1, 0)
last_linear_layer = linear_layers[-1]
nn.init.xavier_normal_(last_linear_layer.weight)
nn.init.uniform_(last_linear_layer.bias, -1, 0)
return mlp | 6,180 | 38.369427 | 150 | py |
CongFu | CongFu-main/models.py | import torch
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn import global_mean_pool
from torch_geometric.nn.conv import MessagePassing, GATConv
from torch_geometric.data import Data
from layers import CongFuLayer, BasicLayer, GINEConv, create_mlp
NUM_ATOM_TYPE = 119
NUM_CHIRALITY_TAG = 3
NUM_BOND_TYPE = 5
NUM_BOND_DIRECTION = 3
class CongFuBasedModel(nn.Module):
def __init__(self, num_layers=5, inject_layer=3, emb_dim=300, mlp_hidden_dims = [256, 128, 64], feature_dim=512, context_dim=908, device=torch.device('cuda')) -> None:
super().__init__()
self.emb_dim = emb_dim
self.device = device
self.context_dim = context_dim
self.x_embedding1 = nn.Embedding(NUM_ATOM_TYPE, emb_dim)
self.x_embedding2 = nn.Embedding(NUM_CHIRALITY_TAG, emb_dim)
nn.init.xavier_uniform_(self.x_embedding1.weight)
nn.init.xavier_uniform_(self.x_embedding2.weight)
basic_layers_number, congfu_layers_number = inject_layer, num_layers - inject_layer
self.basic_layers = self._generate_basic_layers(basic_layers_number)
self.congfu_layers = self._generate_congfu_layers(congfu_layers_number)
self.context_encoder = create_mlp(context_dim, [feature_dim], emb_dim, activation = "relu")
self.output_transformation = create_mlp(emb_dim, [feature_dim], feature_dim//2, activation = "relu")
self.mlp = create_mlp(self.emb_dim + feature_dim, mlp_hidden_dims, 1, activation="leaky_relu")
def _generate_basic_layers(self, number_of_layers: int) -> list[MessagePassing]:
basic_layers = []
for i in range(number_of_layers):
graph_update_gnn = GINEConv(self.emb_dim, self.emb_dim, NUM_BOND_TYPE, NUM_BOND_DIRECTION).to(self.device)
last_layer = i == number_of_layers - 1
basic_layer = BasicLayer(self.emb_dim, graph_update_gnn, last_layer).to(self.device)
basic_layers.append(basic_layer)
return basic_layers
def _generate_congfu_layers(self, number_of_layers: int) -> list[MessagePassing]:
congfu_layers = []
for i in range(number_of_layers):
graph_update_gnn = GINEConv(self.emb_dim, self.emb_dim, NUM_BOND_TYPE, NUM_BOND_DIRECTION).to(self.device)
bottleneck_gnn = GATConv(in_channels=(-1, -1), out_channels=self.emb_dim, add_self_loops=False)
last_layer = i == number_of_layers - 1
congfu_layer = CongFuLayer(self.emb_dim, self.emb_dim, self.emb_dim, graph_update_gnn, bottleneck_gnn, last_layer).to(self.device)
congfu_layers.append(congfu_layer)
return congfu_layers
def _create_context_graph_edges(self, graph: Data) -> torch.Tensor:
return torch.cat([
torch.arange(graph.batch.size(0)).unsqueeze(0).to(self.device),
graph.batch.unsqueeze(0),
], dim=0)
def _embed_x(self, graph: Data) -> Data:
embedding_1 = self.x_embedding1(graph.x[:, 0])
embedding_2 = self.x_embedding2(graph.x[:, 1])
graph.x = embedding_1 + embedding_2
return graph
def forward(self, graphA: Data, graphB: Data, context: torch.Tensor) -> torch.Tensor:
graphA.context_x_edges = self._create_context_graph_edges(graphA)
graphB.context_x_edges = self._create_context_graph_edges(graphB)
graphA = self._embed_x(graphA)
graphB = self._embed_x(graphB)
context = self.context_encoder(context)
for layer in self.basic_layers:
graphA, graphB = layer(graphA, graphB)
for layer in self.congfu_layers:
graphA, graphB, context = layer(graphA, graphB, context)
graphA.x = global_mean_pool(graphA.x, graphA.batch)
graphA.x = self.output_transformation(graphA.x)
graphB.x = global_mean_pool(graphB.x, graphB.batch)
graphB.x = self.output_transformation(graphB.x)
input_ = torch.concat((graphA.x, graphB.x, context), dim=1)
return self.mlp(input_)
| 4,072 | 41.427083 | 171 | py |
CongFu | CongFu-main/train.py | import torch
from torch import nn
from torch_geometric.loader import DataLoader
import wandb
from dataclasses import dataclass
from tqdm import tqdm
import argparse
from dataset import DrugCombDataset
from models import CongFuBasedModel
from utils import get_datasets, get_mol_dict, calculate_auprc, calculate_roc_auc
WANDB_PROJECT = "your_wandb_project_name"
WANDB_ENTITY = "your_wandb_entity"
@dataclass
class TrainConfiguration:
synergy_score: str
transductive: bool
inductive_set_name: str
fold_number: int
batch_size: int
lr: float
number_of_epochs: int
data_folder_path: str
def evaluate_mlp(model: nn.Module, loader: DataLoader, loss_fn, device: torch.device) -> None:
model.eval()
epoch_preds, epoch_labels = [], []
epoch_loss = 0.0
for batch in loader:
batch = [tensor.to(device) for tensor in batch]
drugA, drugB, cell_line, target = batch
with torch.no_grad():
output = model(drugA, drugB, cell_line)
loss = loss_fn(output, target)
epoch_preds.append(output.detach().cpu())
epoch_labels.append(target.detach().cpu())
epoch_loss += loss.item()
epoch_loss = epoch_loss / len(loader)
epoch_preds = torch.cat(epoch_preds)
epoch_labels = torch.cat(epoch_labels)
auprc = calculate_auprc(epoch_labels, epoch_preds)
auc = calculate_roc_auc(epoch_labels, epoch_preds)
if wandb.run is not None:
wandb.log({"val_auprc": auprc, "val_auc": auc, "val_loss": epoch_loss})
def train_model(model: nn.Module, config: TrainConfiguration, device: torch.device) -> None:
dataset, train_dataset, test_dataset, cell_lines = get_datasets(config.data_folder_path, config.fold_number, config.synergy_score, config.transductive, config.inductive_set_name)
mol_mapping = get_mol_dict(dataset)
train_set = DrugCombDataset(train_dataset, cell_lines, mol_mapping)
test_set = DrugCombDataset(test_dataset, cell_lines, mol_mapping)
train_loader = DataLoader(train_set, batch_size=config.batch_size, num_workers=2, shuffle=True)
test_loader = DataLoader(test_set, batch_size=config.batch_size, num_workers=2, shuffle=False)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
loss_fn = nn.BCEWithLogitsLoss()
model.train()
for _ in tqdm(range(config.number_of_epochs)):
epoch_preds, epoch_labels = [], []
epoch_loss = 0.0
for batch in train_loader:
batch = [tensor.to(device) for tensor in batch]
drugA, drugB, cell_line, target = batch
optimizer.zero_grad()
output = model(drugA, drugB, cell_line)
loss = loss_fn(output, target)
epoch_preds.append(output.detach().cpu())
epoch_labels.append(target.detach().cpu())
epoch_loss += loss.item()
loss.backward()
optimizer.step()
epoch_loss = epoch_loss / len(train_loader)
epoch_preds = torch.cat(epoch_preds)
epoch_labels = torch.cat(epoch_labels)
auprc = calculate_auprc(epoch_labels, epoch_preds)
auc = calculate_roc_auc(epoch_labels, epoch_preds)
if wandb.run is not None:
wandb.log({"train_auprc": auprc, "train_auc": auc, "train_loss": epoch_loss})
evaluate_mlp(model, test_loader, loss_fn, device)
def train(config):
if config.with_wandb:
wandb.init(config=config, project=WANDB_PROJECT, entity=WANDB_ENTITY)
print(f'Hyper parameters:\n {wandb.config}')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = CongFuBasedModel(
num_layers=config.num_layers,
inject_layer = config.inject_layer,
emb_dim = config.emb_dim,
feature_dim = config.feature_dim,
context_dim = config.context_dim,
device=device
)
train_configuraion = TrainConfiguration(
synergy_score=config.synergy_score,
transductive = config.transductive,
inductive_set_name = config.inductive_set_name,
lr = config.lr,
number_of_epochs = config.number_of_epochs,
data_folder_path=config.data_folder_path,
fold_number = config.fold_number,
batch_size=config.batch_size
)
train_model(model, train_configuraion, device)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train a CongFu-based model')
parser.add_argument('--num_layers', type=int, default=5)
parser.add_argument('--inject_layer', type=int, default=3)
parser.add_argument('--emb_dim', type=int, default=300)
parser.add_argument('--feature_dim', type=int, default=512)
parser.add_argument('--context_dim', type=int, default=908)
parser.add_argument('--synergy_score', type=str, default="loewe")
parser.add_argument('--transductive', action='store_true')
parser.add_argument('--inductive_set_name', type=str, default="leave_comb")
parser.add_argument('--fold_number', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--number_of_epochs', type=int, default=100)
parser.add_argument('--data_folder_path', type=str, default="data/preprocessed/")
parser.add_argument('--with_wandb', action='store_true')
config = parser.parse_args()
train(config)
| 5,447 | 34.376623 | 182 | py |
ilf | ilf-master/ilf/__main__.py | import sys
import numpy
import torch
import random
import argparse
import logging
from .fuzzers import Environment
from .fuzzers.random import PolicyRandom, ObsRandom
from .fuzzers.imitation import PolicyImitation, ObsImitation
from .fuzzers.symbolic import PolicySymbolic, ObsSymbolic
from .fuzzers.sym_plus import PolicySymPlus, ObsSymPlus
from .fuzzers.mix import PolicyMix, ObsMix
from .execution import Execution
from .common import set_logging
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--execution', dest='execution', type=str, default='./execution.so')
parser.add_argument('--proj', dest='proj', type=str, default=None)
parser.add_argument('--contract', dest='contract', type=str, default=None)
parser.add_argument('--limit', dest='limit', type=int, default=100)
parser.add_argument('--fuzzer', dest='fuzzer', choices=['random', 'imitation', 'symbolic', 'sym_plus', 'mix'], default='random')
parser.add_argument('--model', dest='model', type=str, default=None)
parser.add_argument('--seed', dest='seed', type=int, default=1)
parser.add_argument('--log_to_file', dest='log_to_file', type=str, default=None)
parser.add_argument('-v', dest='v', type=int, default=1, metavar='LOG_LEVEL',
help='Log levels: 0 - NOTSET, 1 - INFO, 2 - DEBUG, 3 - ERROR')
parser.add_argument('--train_dir', dest='train_dir', type=str, default=None)
parser.add_argument('--dataset_dump_path', dest='dataset_dump_path', type=str, default=None)
args = parser.parse_args()
return args
def init(args):
random.seed(args.seed)
set_logging(args.v, args.log_to_file)
torch.manual_seed(args.seed)
numpy.random.seed(args.seed)
sys.setrecursionlimit(8000)
def main():
args = get_args()
init(args)
LOG = logging.getLogger(__name__)
LOG.info('fuzzing start')
if args.proj is not None:
execution = Execution(args.execution)
backend_loggers = execution.set_backend(args.proj)
contract_manager = execution.get_contracts()
if args.contract is not None:
contract_manager.set_fuzz_contracts([args.contract])
account_manager = execution.get_accounts()
if args.fuzzer == 'random':
policy = PolicyRandom(execution, contract_manager, account_manager)
obs = ObsRandom(contract_manager, account_manager, args.dataset_dump_path)
elif args.fuzzer == 'imitation':
assert args.model is not None, 'please specify model directory for using imitation learning policy'
if args.train_dir is not None:
policy = PolicyImitation(None, None, None, args)
policy.start_train()
return
else:
policy = PolicyImitation(execution, contract_manager, account_manager, args)
policy.load_model()
obs = ObsImitation(contract_manager, account_manager, args.dataset_dump_path)
elif args.fuzzer == 'symbolic':
policy = PolicySymbolic(execution, contract_manager, account_manager)
obs = ObsSymbolic(contract_manager, account_manager, args.dataset_dump_path, backend_loggers)
elif args.fuzzer == 'sym_plus':
policy = PolicySymPlus(execution, contract_manager, account_manager)
obs = ObsSymPlus(contract_manager, account_manager, args.dataset_dump_path, backend_loggers)
elif args.fuzzer == 'mix':
policy = PolicyMix(execution, contract_manager, account_manager, args)
obs = ObsMix(contract_manager, account_manager, args.dataset_dump_path, backend_loggers)
environment = Environment(args.limit, args.seed)
environment.fuzz_loop(policy, obs)
if __name__ == '__main__':
main() | 3,708 | 39.315217 | 132 | py |
ilf | ilf-master/ilf/fuzzers/environment.py | import random
import numpy
import torch
import logging
from ..execution import Execution, Tx
from ..ethereum import Method
from .random import PolicyRandom
from .symbolic import PolicySymbolic
from .sym_plus import PolicySymPlus
from .mix import PolicyMix, ObsMix
from .imitation import PolicyImitation
LOG = logging.getLogger(__name__)
class Environment:
def __init__(self, limit, seed):
self.limit = limit
self.seed = seed
def fuzz_loop(self, policy, obs):
obs.init()
LOG.info(obs.stat)
LOG.info('initial calls start')
self.init_txs(policy, obs)
LOG.info('initial calls end')
random.seed(self.seed)
torch.manual_seed(self.seed)
numpy.random.seed(self.seed)
for i in range(1, self.limit+1):
if policy.__class__ in (PolicyRandom, PolicyImitation) and i > self.limit // 2:
for contract_name in policy.contract_manager.fuzz_contract_names:
contract = policy.contract_manager[contract_name]
policy.execution.set_balance(contract.addresses[0], 10 ** 29)
tx = policy.select_tx(obs)
if tx is None:
break
logger = policy.execution.commit_tx(tx)
old_insn_coverage = obs.stat.get_insn_coverage(tx.contract)
obs.update(logger, False)
new_insn_coverage = obs.stat.get_insn_coverage(tx.contract)
if policy.__class__ in (PolicySymbolic, PolicySymPlus) and new_insn_coverage - old_insn_coverage < 1e-5:
break
LOG.info(obs.stat)
if policy.__class__ not in (PolicySymbolic, PolicySymPlus) and i % 50 == 0:
policy.reset()
if policy.__class__ == PolicyImitation:
policy.clear_history()
if policy.__class__ == PolicyMix and policy.policy_fuzz.__class__ == PolicyImitation:
policy.policy_fuzz.clear_history()
if obs.__class__ == ObsMix:
obs.reset()
def init_txs(self, policy, obs):
policy_random = PolicyRandom(policy.execution, policy.contract_manager, policy.account_manager)
for name in policy.contract_manager.fuzz_contract_names:
contract = policy.contract_manager[name]
if Method.FALLBACK not in contract.abi.methods_by_name:
tx = Tx(policy_random, contract.name, contract.addresses[0], Method.FALLBACK, bytes(), [], 0, 0, 0, True)
logger = policy_random.execution.commit_tx(tx)
obs.update(logger, True)
LOG.info(obs.stat)
for method in contract.abi.methods:
if not contract.is_payable(method.name):
tx = policy_random.select_tx_for_method(contract, method, obs)
tx.amount = 1
logger = policy_random.execution.commit_tx(tx)
obs.update(logger, True)
LOG.info(obs.stat) | 3,037 | 35.166667 | 121 | py |
ilf | ilf-master/ilf/fuzzers/imitation/layers.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| 1,350 | 29.704545 | 77 | py |
ilf | ilf-master/ilf/fuzzers/imitation/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import GraphConvolution
from .amounts import AMOUNTS
from .addr_map import ADDR_MAP
HIDDEN_PARAMS = 100
class ArgsNet(nn.Module):
def __init__(self, input_size, hidden_size):
super(ArgsNet, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.gru = nn.GRUCell(self.input_size, self.hidden_size)
self.fc1 = nn.Linear(self.hidden_size, 50)
self.fc2 = nn.Linear(50, self.input_size)
def forward(self, input, hidden):
new_hidden = self.gru(input, hidden)
out = F.relu(self.fc1(new_hidden))
out = self.fc2(out)
return out, new_hidden
class ParamsNet(nn.Module):
def __init__(self, input_size):
super(ParamsNet, self).__init__()
self.input_size = input_size
self.fc1_addr = nn.Linear(self.input_size, HIDDEN_PARAMS)
# self.bn1_addr = nn.BatchNorm1d(HIDDEN_PARAMS)
self.fc2_addr = nn.Linear(HIDDEN_PARAMS, HIDDEN_PARAMS)
# self.bn2_addr = nn.BatchNorm1d(HIDDEN_PARAMS)
self.final_fc_addr = nn.Linear(HIDDEN_PARAMS, len(ADDR_MAP))
self.fc1_amount = nn.Linear(self.input_size, HIDDEN_PARAMS)
self.fc2_amount = nn.Linear(HIDDEN_PARAMS, HIDDEN_PARAMS)
self.fc3_amount = nn.Linear(HIDDEN_PARAMS, len(AMOUNTS))
def predict_sender(self, x):
assert x.size()[1] == self.input_size
x_addr = F.relu(self.fc1_addr(x))
# x_addr = self.bn1_addr(x_addr)
x_addr = F.relu(self.fc2_addr(x_addr))
# x_addr = self.bn2_addr(x_addr)
x_addr = self.final_fc_addr(x_addr)
return x_addr
def predict_amount(self, x):
assert x.size()[1] == self.input_size
x_addr = F.relu(self.fc1_amount(x))
x_addr = F.relu(self.fc2_amount(x_addr))
x_addr = self.fc3_amount(x_addr)
return x_addr
class EmbedGCN(nn.Module):
def __init__(self, n_feat, n_hid, n_embed):
super(EmbedGCN, self).__init__()
self.gc1 = GraphConvolution(n_feat, 5*n_hid)
# self.bn1 = nn.BatchNorm1d(3*n_hid)
self.gc2 = GraphConvolution(5*n_hid, 3*n_hid)
# self.bn2 = nn.BatchNorm1d(n_hid)
self.gc3 = GraphConvolution(3*n_hid, n_hid)
# self.bn3 = nn.BatchNorm1d(n_hid)
# self.gc4 = GraphConvolution(n_hid, n_hid)
# self.bn4 = nn.BatchNorm1d(n_hid)
# self.gc5 = GraphConvolution(n_hid, n_hid)
# self.bn5 = nn.BatchNorm1d(n_hid)
self.gc6 = GraphConvolution(n_hid, n_embed)
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
# x = self.bn1(x)
x = F.relu(self.gc2(x, adj))
# x = self.bn2(x)
x = F.relu(self.gc3(x, adj))
# x = self.bn3(x)
# x = F.relu(self.gc4(x, adj))
# x = self.bn4(x)
# x = F.relu(self.gc5(x, adj))
# x = self.bn5(x)
x = self.gc6(x, adj)
return x
class PolicyNet(nn.Module):
def __init__(self, raw_method_size, method_size, state_size):
super(PolicyNet, self).__init__()
self.raw_method_size = raw_method_size
self.method_size = method_size
self.state_size = state_size
self.fc1 = nn.Linear(self.state_size, 200)
self.bn1 = nn.BatchNorm1d(200)
self.fc3 = nn.Linear(2*self.method_size, 200)
self.bn3 = nn.BatchNorm1d(200)
self.fc = nn.Linear(400, 100)
self.bn = nn.BatchNorm1d(100)
self.fc_function = nn.Linear(100, 50)
self.bn_function = nn.BatchNorm1d(50)
self.fc_function2 = nn.Linear(50, 1)
# Layers for compression of feature map
self.fc_feat1 = nn.Linear(self.raw_method_size, 200)
# self.bn_feat1 = nn.BatchNorm1d(200)
self.fc_feat2 = nn.Linear(200, 100)
# self.bn_feat2 = nn.BatchNorm1d(100)
self.fc_feat3 = nn.Linear(100, self.method_size)
def compress_features(self, x):
x = F.relu(self.fc_feat1(x))
x = F.relu(self.fc_feat2(x))
x = torch.sigmoid(self.fc_feat3(x))
return x
def predict_method(self, x_state, x_method):
assert x_state.size()[1] == self.state_size, '{} vs {}'.format(x_state.size()[1], self.state_size)
assert x_method.size()[1] == 2*self.method_size, '{} vs {}'.format(x_method.size()[1], self.method_size)
x_state = F.relu(self.fc1(x_state))
# x_state = self.bn1(x_state)
x_method = F.relu(self.fc3(x_method))
# x_method = self.bn3(x_method)
x = torch.cat([x_state, x_method], dim=1)
x = F.relu(self.fc(x))
# x = self.bn(x)
x = F.relu(self.fc_function(x))
# x = self.bn_function(x)
return self.fc_function2(x)
| 4,942 | 28.422619 | 112 | py |
ilf | ilf-master/ilf/fuzzers/imitation/policy_imitation.py | import time
import random
import numpy as np
import itertools
import pickle
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from datetime import datetime
from torch.optim import Adam, SGD
from torch.optim.lr_scheduler import StepLR
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
from enum import Enum
from tqdm import tqdm
from ..policy_base import PolicyBase
from ...ethereum import SolType
from ...execution import Tx
from .models import PolicyNet, EmbedGCN, ParamsNet, ArgsNet
from .dataset import Input, Sample, Dataset, GraphsCollection
from .constants import BOW_SIZE, GCN_HIDDEN, MINI_BATCH_SIZE, ADAM_LEARNING_RATE, NUM_EPOCHS
from .nlp import NLP
from .int_values import INT_VALUES
from .amounts import AMOUNTS
from .addr_map import ADDR_MAP
use_cuda = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(use_cuda)
ADDR_FEAT = 10
RNN_HIDDEN_SIZE = 100
NUM_LAYERS = 1
RAW_FEATURE_SIZE = 65 + 300
INT_EXPLORE_RATE = -1
class PolicyImitation(PolicyBase):
def __init__(self, execution, contract_manager, account_manager, args):
if contract_manager is not None:
super().__init__(execution, contract_manager, account_manager)
self.addr_map = ADDR_MAP
self.int_values = INT_VALUES
self.amounts = AMOUNTS
self.slice_size = 2
self.raw_feature_size = RAW_FEATURE_SIZE
self.feature_size = RNN_HIDDEN_SIZE
self.state_size = RNN_HIDDEN_SIZE
self.net = PolicyNet(self.raw_feature_size, self.feature_size, self.state_size).to(device)
self.gcn = EmbedGCN(self.feature_size, GCN_HIDDEN, self.feature_size).to(device)
self.params_net = ParamsNet(RNN_HIDDEN_SIZE).to(device)
self.addr_args_net = ArgsNet(10, RNN_HIDDEN_SIZE).to(device)
self.int_args_net = ArgsNet(len(self.int_values)+1, RNN_HIDDEN_SIZE).to(device)
self.rnn = nn.GRU(self.feature_size, RNN_HIDDEN_SIZE, NUM_LAYERS, dropout=0.0).to(device)
self.scaler = None
self.graphs_col = GraphsCollection()
self.last_method = dict()
self.hidden = dict()
self.first_hidden = dict()
self.graph_embeddings = dict()
self.args = args
self.method_names = {}
self.method_bows = {}
self.nlp = NLP()
self.nlp.w2v = pickle.load(open('ilf_w2v.pkl', 'rb'))
self.adam = Adam(list(self.net.parameters()) + list(self.params_net.parameters()) + list(self.gcn.parameters()) + \
list(self.int_args_net.parameters()) + list(self.addr_args_net.parameters()) + list(self.rnn.parameters()),
lr=ADAM_LEARNING_RATE,
weight_decay=1e-5)
self.scheduler = StepLR(self.adam, step_size=1000, gamma=1.0)
def start_train(self):
print('starting training from {} of dataset'.format(self.args.train_dir))
method_names, gc, dataset = Dataset.load(self.args.train_dir, self.addr_map, self.int_values, self.amounts)
self.graphs_col = gc
self.method_names = method_names
self.train(dataset)
def load_model(self):
load_dir = self.args.model
self.scaler = joblib.load(os.path.join(load_dir, 'scaler.pkl'))
if use_cuda == 'cuda':
self.net.load_state_dict(torch.load(os.path.join(load_dir, 'net.pt')))
self.gcn.load_state_dict(torch.load(os.path.join(load_dir, 'gcn.pt')))
self.params_net.load_state_dict(torch.load(os.path.join(load_dir, 'params_net.pt')))
self.addr_args_net.load_state_dict(torch.load(os.path.join(load_dir, 'addr_args_net.pt')))
self.int_args_net.load_state_dict(torch.load(os.path.join(load_dir, 'int_args_net.pt')))
self.rnn.load_state_dict(torch.load(os.path.join(load_dir, 'rnn.pt')))
else:
self.net.load_state_dict(torch.load(os.path.join(load_dir, 'net.pt'), map_location='cpu'))
self.gcn.load_state_dict(torch.load(os.path.join(load_dir, 'gcn.pt'), map_location='cpu'))
self.params_net.load_state_dict(torch.load(os.path.join(load_dir, 'params_net.pt'), map_location='cpu'))
self.addr_args_net.load_state_dict(torch.load(os.path.join(load_dir, 'addr_args_net.pt'), map_location='cpu'))
self.int_args_net.load_state_dict(torch.load(os.path.join(load_dir, 'int_args_net.pt'), map_location='cpu'))
self.rnn.load_state_dict(torch.load(os.path.join(load_dir, 'rnn.pt'), map_location='cpu'))
self.net.eval()
self.gcn.eval()
self.params_net.eval()
self.addr_args_net.eval()
self.int_args_net.eval()
self.rnn.eval()
def calc_method_features(self, contract_name, method_features, scale=False):
num_methods = len(self.method_names[contract_name])
features = np.zeros((num_methods, self.raw_feature_size))
for i, method in enumerate(self.method_names[contract_name]):
method_w2v = self.nlp.embed_method(method)
method_feats = np.concatenate([np.array(method_features[method]), method_w2v], axis=0)
features[i, :self.raw_feature_size] = method_feats[:self.raw_feature_size]
if scale:
features = self.scaler.transform(features)
return features
def compute_init_hidden(self, inputs):
big_edges, big_features = [], []
off = 0
for input in inputs:
num_methods = len(self.method_names[input.contract])
raw_method_features = input.method_features
features = self.calc_method_features(input.contract, input.method_features, True)
num_fields, edges = self.graphs_col.get(input.contract)
big_features.append(np.zeros((num_fields, self.raw_feature_size)))
big_features.append(features)
edges = torch.LongTensor(edges).to(device) + off
big_edges.append(edges)
off += num_methods + num_fields
big_edges = torch.cat(big_edges, dim=0)
big_features = np.concatenate(big_features, axis=0)
big_features = torch.from_numpy(big_features).float().to(device)
comp_features = self.net.compress_features(big_features)
if use_cuda == 'cuda':
adj = torch.cuda.sparse.FloatTensor(
big_edges.t(),
torch.ones(big_edges.size()[0]).to(device),
torch.Size([off, off]))
else:
adj = torch.sparse.FloatTensor(
big_edges.t(),
torch.ones(big_edges.size()[0]).to(device),
torch.Size([off, off]))
graph_embeddings = self.gcn(comp_features, adj)
all_state_feat = []
off = 0
for input in inputs:
num_fields = self.graphs_col.get(input.contract)[0]
num_methods = len(self.method_names[input.contract])
idx_b, idx_e = off, off + num_fields + num_methods
all_state_feat.append(torch.mean(graph_embeddings[idx_b:idx_e], dim=0).unsqueeze(0))
off += num_methods + num_fields
assert off == graph_embeddings.size()[0]
return torch.cat(all_state_feat, dim=0), graph_embeddings
def compute_rnn_inputs(self, batch, graph_embeddings):
seq_len = 0
for samples in batch:
seq_len = max(seq_len, len(samples))
inp = np.zeros((seq_len, len(batch), self.feature_size))
for i, samples in enumerate(batch):
for j in range(len(samples) - 1):
next_feats = self.calc_method_features(samples[j+1].input.contract,
samples[j+1].input.method_features,
True)
next_feats = torch.from_numpy(next_feats).float().to(device)
next_feats = self.net.compress_features(next_feats)
target_idx = self.method_names[samples[j].input.contract].index(samples[j].output.method_name)
inp[j + 1, i, :] = next_feats[target_idx].detach().cpu().numpy()
return torch.from_numpy(inp).float().to(device)
def compute_f(self, batch, rnn_out, graph_embeddings):
x_state, x_method_feat, x_method_graph = [], [], []
off = 0
for i, samples in enumerate(batch):
for j, sample in enumerate(samples):
num_fields = self.graphs_col.get(sample.input.contract)[0]
num_methods = len(sample.input.method_features)
feats = self.calc_method_features(sample.input.contract,
sample.input.method_features,
True)
x_method_feat.append(torch.from_numpy(feats).float().to(device))
x_method_graph.append(graph_embeddings[off+num_fields:off+num_fields+num_methods])
x_state.append(rnn_out[j, i].view(1, -1).repeat(num_methods, 1))
off += num_fields + num_methods
assert off == graph_embeddings.size()[0]
x_method_feat = torch.cat(x_method_feat, dim=0).to(device)
x_method_graph = torch.cat(x_method_graph, dim=0).to(device)
x_method_feat = self.net.compress_features(x_method_feat)
x_method = torch.cat([x_method_feat, x_method_graph], dim=1)
x_state = torch.cat(x_state, dim=0)
f_outs = self.net.predict_method(x_state, x_method)
return f_outs
def compute_sender_amount(self, batch, rnn_out):
x_feat = []
for i, samples in enumerate(batch):
for j, sample in enumerate(samples):
x_feat.append(rnn_out[j, i].view(1, -1))
x_feat = torch.cat(x_feat, dim=0).to(device)
sender_outs = self.params_net.predict_sender(x_feat)
amount_outs = self.params_net.predict_amount(x_feat)
return sender_outs, amount_outs
def compute_addr_args(self, batch, rnn_out):
hidden, max_args = [], 0
for i, samples in enumerate(batch):
for j, sample in enumerate(samples):
hidden.append(rnn_out[j, i].view(1, -1))
max_args = max(max_args, len(sample.output.addr_args))
hidden = torch.cat(hidden, dim=0)
input = torch.zeros((hidden.size()[0], 10)).to(device)
addr_outs = []
for idx in range(max_args):
out, hidden = self.addr_args_net(input, hidden)
addr_outs.append(out)
input = torch.zeros((hidden.size()[0], 10)).to(device)
curr_idx = 0
for i, samples in enumerate(batch):
for j, sample in enumerate(samples):
if idx < len(sample.output.addr_args) and sample.output.addr_args[idx] < 10:
input[curr_idx, sample.output.addr_args[idx]] = 1
curr_idx += 1
assert curr_idx == hidden.size()[0]
return addr_outs
def compute_int_args(self, batch, rnn_out):
hidden, max_args = [], 0
for i, samples in enumerate(batch):
for j, sample in enumerate(samples):
hidden.append(rnn_out[j, i].view(1, -1))
max_args = max(max_args, len(sample.output.int_args))
hidden = torch.cat(hidden, dim=0)
input = torch.zeros((hidden.size()[0], len(self.int_values)+1)).to(device)
int_outs = []
for idx in range(max_args):
out, hidden = self.int_args_net(input, hidden)
int_outs.append(out)
input = torch.zeros((hidden.size()[0], len(self.int_values)+1)).to(device)
curr_idx = 0
for i, samples in enumerate(batch):
for j, sample in enumerate(samples):
if idx < len(sample.output.int_args):
input[curr_idx, sample.output.int_args[idx]] = 1
curr_idx += 1
assert curr_idx == hidden.size()[0]
return int_outs
def evaluate(self, dataset, epoch):
batches = dataset.make_batches(MINI_BATCH_SIZE)
tot_loss, tot_amount_loss, tot_facc, tot_sacc, tot_addr_acc, tot_int_acc, tot_amount_acc = 0, 0, 0, 0, 0, 0, 0
tot_amount = 0
assert len(batches) > 0
for batch in tqdm(batches):
init = []
for samples in batch:
init.append(samples[0].input)
init_hidden = torch.zeros((NUM_LAYERS, len(batch), RNN_HIDDEN_SIZE)).to(device)
first_hidden, graph_embeddings = self.compute_init_hidden(init)
init_hidden[0] = first_hidden
inp = self.compute_rnn_inputs(batch, graph_embeddings)
rnn_out, _ = self.rnn(inp, init_hidden)
f_outs = self.compute_f(batch, rnn_out, graph_embeddings)
sender_outs, amount_outs = self.compute_sender_amount(batch, rnn_out)
addr_outs = self.compute_addr_args(batch, rnn_out)
int_outs = self.compute_int_args(batch, rnn_out)
batch_loss, amount_loss, batch_facc, batch_sacc, batch_addr_acc, batch_int_acc = 0, 0, 0, 0, 0, 0
off, num_samples, num_pred_addr, num_pred_int, num_amount = 0, 0, 0, 0, 0
# method
off, idx = 0, 0
for i, samples in enumerate(batch):
for j, sample in enumerate(samples):
num_methods = len(sample.input.method_features)
num_addresses = sample.input.num_addresses
if not sample.output.use_train:
off += num_methods
idx += 1
continue
f_log_probs = F.log_softmax(f_outs[off:off+num_methods].view(-1), dim=0)
sender_log_probs = F.log_softmax(sender_outs[idx], dim=0)
amount_log_probs = F.log_softmax(amount_outs[idx], dim=0)
addr_loss, addr_acc = 0, 0
for k in range(len(sample.output.addr_args)):
addr_log_probs = F.log_softmax(addr_outs[k][idx][:num_addresses], dim=0)
_, pred_addr = torch.max(addr_log_probs, dim=0)
addr_acc += 1 if pred_addr == sample.output.addr_args[k] else 0
addr_loss += -addr_log_probs[sample.output.addr_args[k]]
num_pred_addr += 1
if len(sample.output.addr_args) > 0:
addr_loss /= len(sample.output.addr_args)
batch_loss += addr_loss
int_loss, int_acc = 0, 0
for k in range(len(sample.output.int_args)):
if sample.output.int_args[k] == len(self.int_values):
continue
int_log_probs = F.log_softmax(int_outs[k][idx], dim=0)
_, pred_int = torch.max(int_log_probs, dim=0)
int_acc += 1 if pred_int == sample.output.int_args[k] else 0
int_loss += -int_log_probs[sample.output.int_args[k]]
num_pred_int += 1
if len(sample.output.int_args) > 0:
int_loss /= len(sample.output.int_args)
batch_loss += int_loss
target_f = self.method_names[sample.input.contract].index(sample.output.method_name)
target_sender = sample.output.sender
target_amount = sample.output.amount
_, pred_f = torch.max(f_log_probs, dim=0)
_, pred_sender = torch.max(sender_log_probs, dim=0)
_, pred_amount = torch.max(amount_log_probs, dim=0)
if target_amount is not None:
batch_loss += -amount_log_probs[target_amount]
tot_amount += 1
tot_amount_acc += 1 if pred_amount == target_amount else 0
batch_loss += -f_log_probs[target_f]
batch_loss += -sender_log_probs[target_sender]
batch_facc += 1 if pred_f == target_f else 0
batch_sacc += 1 if pred_sender == target_sender else 0
batch_addr_acc += addr_acc
batch_int_acc += int_acc
num_samples += 1
off += num_methods
idx += 1
assert off == f_outs.size()[0]
assert idx == sender_outs.size()[0]
batch_loss /= num_samples
batch_facc /= num_samples
batch_sacc /= num_samples
if num_pred_addr > 0:
batch_addr_acc /= num_pred_addr
if num_pred_int > 0:
batch_int_acc /= num_pred_int
self.adam.zero_grad()
batch_loss.backward()
self.adam.step()
tot_loss += batch_loss.item()
tot_amount_loss += amount_loss
tot_facc += batch_facc
tot_sacc += batch_sacc
tot_addr_acc += batch_addr_acc
tot_int_acc += batch_int_acc
tot_loss /= len(batches)
tot_amount_loss /= len(batches)
tot_facc /= len(batches)
tot_sacc /= len(batches)
tot_addr_acc /= len(batches)
tot_int_acc /= len(batches)
tot_amount_acc /= tot_amount
return tot_loss, tot_amount_loss, tot_facc, tot_sacc, tot_addr_acc, tot_int_acc, tot_amount_acc
def train(self, train, valid=None):
self.net.train()
self.gcn.train()
self.params_net.train()
self.addr_args_net.train()
self.rnn.train()
savedir = self.args.model
print('saving to ', savedir)
try:
os.makedirs(savedir)
except FileExistsError:
print('Warning: directory {} already exists, it will be overwritten!'.format(savedir))
prev_best_facc = None
all_feat = []
for samples in train.data:
for sample in samples:
features = self.calc_method_features(sample.input.contract, sample.input.method_features)
all_feat.append(features)
all_feat = np.concatenate(all_feat, axis=0)
if all_feat.shape[0] > 5000:
all_feat = all_feat[:5000]
self.scaler = StandardScaler()
self.scaler.fit(all_feat)
joblib.dump(self.scaler, os.path.join(savedir, 'scaler.pkl'))
for epoch in range(NUM_EPOCHS):
self.scheduler.step()
train.shuffle()
tot_loss, _, tot_facc, tot_sacc, tot_addr_acc, tot_int_acc, tot_amount_acc = self.evaluate(train, epoch)
if epoch % 1 == 0:
print('[TRAIN] Epoch = %d, Loss = %.4f, Acc@F = %.2lf, Acc@S = %.2lf, Acc@ADDR = %.2lf, Acc@INT = %.2lf, Acc@AMO = %.2lf' % (
epoch, tot_loss, tot_facc*100, tot_sacc*100, tot_addr_acc*100, tot_int_acc*100, tot_amount_acc*100))
if epoch % 1 == 0:
torch.save(self.net.state_dict(), os.path.join(savedir, 'net_{}.pt'.format(epoch)))
torch.save(self.params_net.state_dict(), os.path.join(savedir, 'params_net_{}.pt'.format(epoch)))
torch.save(self.gcn.state_dict(), os.path.join(savedir, 'gcn_{}.pt'.format(epoch)))
torch.save(self.addr_args_net.state_dict(), os.path.join(savedir, 'addr_args_net_{}.pt'.format(epoch)))
torch.save(self.int_args_net.state_dict(), os.path.join(savedir, 'int_args_net_{}.pt'.format(epoch)))
torch.save(self.rnn.state_dict(), os.path.join(savedir, 'rnn_{}.pt'.format(epoch)))
def clear_history(self):
for contract in self.hidden:
self.hidden[contract][0] = self.first_hidden[contract]
def select_method(self, contract, obs):
method_feats = {}
for m in contract.abi.methods:
self.method_bows[m.name] = m.bow
for method, feats in obs.record_manager.get_method_features(contract.name).items():
method_feats[method] = feats + self.method_bows[method]
trace_op_bow = obs.trace_bow
curr_input = Input(contract.name, method_feats, trace_op_bow, len(self.addresses))
if contract.name not in self.last_method:
self.graphs_col.add_graph(contract.name, [m.storage_args for m in contract.abi.methods])
self.method_names[contract.name] = [m.name for m in contract.abi.methods]
# self.first_tx = False
self.hidden[contract.name] = torch.zeros((NUM_LAYERS, 1, RNN_HIDDEN_SIZE)).to(device)
first_hidden, self.graph_embeddings[contract.name] = self.compute_init_hidden([curr_input])
self.first_hidden[contract.name] = first_hidden
self.hidden[contract.name][0] = first_hidden
if contract.name not in self.last_method:
rnn_input = torch.zeros((1, 1, self.feature_size)).to(device)
else:
rnn_input = self.calc_method_features(contract.name, method_feats, True)
rnn_input = torch.from_numpy(rnn_input[self.last_method[contract.name]]).float().to(device)
rnn_input = self.net.compress_features(rnn_input)
rnn_input = rnn_input.view((1, 1, self.feature_size))
rnn_out, self.hidden[contract.name] = self.rnn(rnn_input, self.hidden[contract.name])
sample = Sample(curr_input, None)
f_outs = self.compute_f([[sample]], rnn_out, self.graph_embeddings[contract.name]).view(-1)
f_probs = F.softmax(f_outs, dim=0).detach().cpu().numpy()
pred_f = np.random.choice(len(contract.abi.methods), p=f_probs)
return pred_f, sample, rnn_out
def update_tx(self, tx, obs):
# contract = self._select_contract()
contract = self.contract_manager[tx.contract]
self.select_method(contract.name, obs)
self.last_method[contract.name] = self.method_names[contract.name].index(tx.method)
def select_tx(self, obs):
r = random.random()
if r >= 0.2:
self.slice_size = random.randint(1, 5)
else:
self.slice_size = None
contract = self._select_contract()
address = contract.addresses[0]
pred_f, sample, rnn_out = self.select_method(contract, obs)
sender_outs, amount_outs = self.compute_sender_amount([[sample]], rnn_out)
sender_probs = F.softmax(sender_outs.view(-1), dim=0).detach().cpu().numpy()
amount_probs = F.softmax(amount_outs.view(-1), dim=0).detach().cpu().numpy()
pred_sender = np.random.choice(len(self.addr_map), p=sender_probs)
pred_amount = np.random.choice(len(self.amounts), p=amount_probs)
method = contract.abi.methods[pred_f]
attacker_indices = self.account_manager.attacker_indices
if np.random.random() < len(attacker_indices) / len(self.account_manager.accounts):
sender = int(np.random.choice(attacker_indices))
else:
sender = pred_sender
arguments, addr_args, int_args = self._select_arguments(contract, method, sender, obs, rnn_out)
amount = self._select_amount(contract, method, sender, obs, pred_amount)
timestamp = self._select_timestamp(obs)
self.last_method[contract.name] = pred_f
tx = Tx(self, contract.name, address, method.name, bytes(), arguments, amount, sender, timestamp, True)
return tx
def _select_contract(self):
contract_name = random.choice(self.contract_manager.fuzz_contract_names)
return self.contract_manager[contract_name]
def _select_amount(self, contract, method, sender, obs, pred_amount=None):
if sender in self.account_manager.attacker_indices:
return 0
if self.contract_manager.is_payable(contract.name, method.name):
if pred_amount is None:
amount = random.randint(0, self.account_manager[sender].amount)
else:
amount = self.amounts[pred_amount]
return amount
else:
return 0
def _select_sender(self):
return random.choice(range(0, len(self.account_manager.accounts)))
def _select_arguments(self, contract, method, sender, obs, rnn_out):
hidden_addr = rnn_out[0, 0].view(1, -1)
input_addr = torch.zeros((1, 10)).to(device)
hidden_int = rnn_out[0, 0].view(1, -1)
input_int = torch.zeros((1, len(self.int_values)+1)).to(device)
arguments, addr_args, int_args = [], [], []
for arg in method.inputs:
t = arg.evm_type.t
if t == SolType.IntTy or t == SolType.UintTy:
s = random.random()
if s >= INT_EXPLORE_RATE:
out, hidden_int = self.int_args_net(input_int, hidden_int)
int_probs = F.softmax(out.view(-1), dim=0)
int_probs = int_probs.detach().cpu().numpy()
chosen_int = np.random.choice(len(self.int_values)+1, p=int_probs)
input_int = torch.zeros((1, len(self.int_values)+1)).to(device)
input_int[0, chosen_int] = 1
int_args.append(chosen_int)
else:
chosen_int = None
if t == SolType.IntTy:
arguments.append(self._select_int(contract, method, arg.evm_type.size, obs, chosen_int))
elif t == SolType.UintTy:
arguments.append(self._select_uint(contract, method, arg.evm_type.size, obs, chosen_int))
elif t == SolType.BoolTy:
arguments.append(self._select_bool())
elif t == SolType.StringTy:
arguments.append(self._select_string(obs))
elif t == SolType.SliceTy:
arg = self._select_slice(contract, method, sender, arg.evm_type.elem, obs, rnn_out)
arguments.append(arg)
elif t == SolType.ArrayTy:
arg = self._select_array(contract, method, sender, arg.evm_type.size, arg.evm_type.elem, obs, rnn_out)
arguments.append(arg)
elif t == SolType.AddressTy:
out, hidden_addr = self.addr_args_net(input_addr, hidden_addr)
addr_probs = F.softmax(out.view(-1)[:len(self.addresses)], dim=0)
addr_probs = addr_probs.detach().cpu().numpy()
chosen_addr = np.random.choice(len(self.addresses[:len(addr_probs)]), p=addr_probs)
arguments.append(self._select_address(sender, chosen_addr))
input_addr = torch.zeros((1, 10)).to(device)
input_addr[0, chosen_addr] = 1
addr_args.append(chosen_addr)
elif t == SolType.FixedBytesTy:
arguments.append(self._select_fixed_bytes(arg.evm_type.size, obs))
elif t == SolType.BytesTy:
arguments.append(self._select_bytes(obs))
else:
assert False, 'type {} not supported'.format(t)
return arguments, addr_args, int_args
def _select_int(self, contract, method, size, obs, chosen_int=None):
if chosen_int is not None and chosen_int != len(self.int_values):
value = self.int_values[chosen_int]
value &= ((1 << size) - 1)
if value & (1 << (size - 1)):
value -= (1 << size)
return value
p = 1 << (size - 1)
return random.randint(-p, p-1)
def _select_uint(self, contract, method, size, obs, chosen_int=None):
if chosen_int is not None and chosen_int != len(self.int_values):
value = self.int_values[chosen_int]
value &= ((1 << size) - 1)
return value
p = 1 << size
return random.randint(0, p-1)
def _select_address(self, sender, idx=None):
if sender in self.account_manager.attacker_indices:
if idx is None:
return random.choice(self.addresses)
else:
return self.addresses[idx]
else:
if idx is None or self.addresses[idx] in self.account_manager.attacker_addresses:
l = [addr for addr in self.addresses if addr not in self.account_manager.attacker_addresses]
return random.choice(l)
else:
return self.addresses[idx]
def _select_bool(self):
return random.choice([True, False])
def _select_string(self, obs):
bs = []
size = random.randint(0, 40)
for _ in range(size):
bs.append(random.randint(1, 127))
return bytearray(bs).decode('ascii')
def _select_slice(self, contract, method, sender, typ, obs, rnn_out):
if self.slice_size is None:
size = random.randint(1, 15)
else:
size = self.slice_size
return self._select_array(contract, method, sender, size, typ, obs, rnn_out)
def _select_array(self, contract, method, sender, size, typ, obs, rnn_out):
hidden_addr = rnn_out[0, 0].view(1, -1)
input_addr = torch.zeros((1, 10)).to(device)
hidden_int = rnn_out[0, 0].view(1, -1)
input_int = torch.zeros((1, len(self.int_values)+1)).to(device)
t = typ.t
arr = []
for _ in range(size):
if t in (SolType.IntTy, SolType.UintTy):
s = random.random()
if s >= INT_EXPLORE_RATE:
out, hidden_int = self.int_args_net(input_int, hidden_int)
int_probs = F.softmax(out.view(-1), dim=0)
int_probs = int_probs.detach().cpu().numpy()
chosen_int = np.random.choice(len(self.int_values)+1, p=int_probs)
input_int = torch.zeros((1, len(self.int_values)+1)).to(device)
input_int[0, chosen_int] = 1
else:
chosen_int = None
if t == SolType.IntTy:
arr.append(self._select_int(contract, method, typ.size, obs, chosen_int))
elif t == SolType.UintTy:
arr.append(self._select_uint(contract, method, typ.size, obs, chosen_int))
elif t == SolType.BoolTy:
arr.append(self._select_bool())
elif t == SolType.StringTy:
arr.append(self._select_string(obs))
elif t == SolType.SliceTy:
arg = self._select_slice(contract, method, sender, typ.elem, obs, rnn_out)
arr.append(arg)
elif t == SolType.ArrayTy:
arg = self._select_array(contract, method, sender, typ.size, typ.elem, obs, rnn_out)
arr.append(arg)
elif t == SolType.AddressTy:
out, hidden_addr = self.addr_args_net(input_addr, hidden_addr)
addr_probs = F.softmax(out.view(-1)[:len(self.addresses)], dim=0)
addr_probs = addr_probs.detach().cpu().numpy()
chosen_addr = np.random.choice(len(self.addresses[:len(addr_probs)]), p=addr_probs)
input_addr = torch.zeros((1, 10)).to(device)
input_addr[0, chosen_addr] = 1
arr.append(self._select_address(sender, chosen_addr))
elif t == SolType.FixedBytesTy:
arr.append(self._select_fixed_bytes(typ.size, obs))
elif t == SolType.BytesTy:
arr.append(self._select_bytes(obs))
else:
assert False, 'type {} not supported'.format(t)
return arr
def _select_fixed_bytes(self, size, obs):
bs = []
for _ in range(size):
bs.append(random.randint(0, 255))
return bs
def _select_bytes(self, obs):
size = random.randint(1, 15)
return self._select_fixed_bytes(size, obs)
| 32,081 | 45.094828 | 141 | py |
speech-privacy | speech-privacy-main/src/main.py | """
Script to train adversary classifier
"""
import argparse
import numpy as np
import os
import pickle
import random
import time
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from data_utils import *
from model import *
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def train(model, loader, optimizer, criterion, clip, device):
model.train()
epoch_loss = 0.0
for i, (audio, label) in enumerate(loader):
audio = audio.cuda(device)
label = label.cuda(device)
optimizer.zero_grad()
output = model(audio)
loss = criterion(output, label)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(loader)
def evaluate(model, loader, criterion, device):
model.eval()
epoch_loss = 0.0
correct = 0
total_samples = 0
with torch.no_grad():
for i, (audio, label) in enumerate(loader):
audio = audio.cuda(device)
label = label.cuda(device)
output = model(audio)
pred = output.data.max(1, keepdim=True)[1]
matchings = pred.eq(label.data.view_as(pred).type(torch.cuda.LongTensor))
correct = correct + matchings.sum()
total_samples = total_samples + audio.size()[0]
loss = criterion(output, label)
epoch_loss += loss.item()
acc = float(correct)/total_samples
return epoch_loss/len(loader), acc
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--load', default='', type=str, help='checkpoint path to load')
parser.add_argument('--cuda', default=0, type=int, help='cuda device')
parser.add_argument('-w', '--num-workers', default=1, type=int, help='number of workers')
parser.add_argument('--lr', default=3e-4, type=float, help='learning rate')
parser.add_argument('--expt', default='exp-ls', type=str, help='experiment id')
parser.add_argument('--epochs', default=50, type=int, help='number of epochs')
parser.add_argument('--batch-size', default=64, type=int, help='batch size')
parser.add_argument('--batch-size-2', default=4, type=int, help='batch size')
parser.add_argument('--clip', default=0.25, type=float, help='max norm of the gradients')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--spkr-seed', default=-1, type=int, help='speaker seed')
parser.add_argument('--mode', default='default', type=str, help='type of experiment')
parser.add_argument('--num-spkr', default=1, type=int, help='# of train spkrs per gender')
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
log_dir = 'runs/%s' % args.expt
writer = SummaryWriter(log_dir)
print('tensorboard log in %s' % log_dir)
load_w2v2_cp = True
ckpt_path_to_load = None
ckpt_path = '%s/model.pt' % log_dir
if os.path.exists(args.load):
load_w2v2_cp = False
ckpt_path_to_load = args.load
elif os.path.exists(ckpt_path):
load_w2v2_cp = False
ckpt_path_to_load = ckpt_path
num_classes = 2
model = VGGVoxClf()
model = model.cuda(args.cuda)
if ckpt_path_to_load is not None:
print('loading model')
model.load_state_dict(torch.load(ckpt_path_to_load))
print('The model has %d trainable parameters' % count_parameters(model))
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
train_dataset = librispeech_dataset('train', mode=args.mode, num_spkr=args.num_spkr, spkr_seed=args.spkr_seed)
val_dataset = librispeech_dataset('test', mode=args.mode, num_spkr=args.num_spkr)
assert len(set(train_dataset.speaker_ids).intersection(set(val_dataset.speaker_ids))) == 0
train_loader = DataLoader(train_dataset, collate_fn=collate_1d_float, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
val_loader = DataLoader(val_dataset, collate_fn=collate_1d_float, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
print('created dataloaders')
start_time = time.time()
valid_loss, valid_acc = evaluate(model, val_loader, criterion, args.cuda)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: 0 | Time: {epoch_mins}m {epoch_secs}s | Acc: {valid_acc:.3f}')
best_valid_loss = valid_loss
for epoch in range(args.epochs):
start_time = time.time()
train_loss = train(model, train_loader, optimizer, criterion, args.clip, args.cuda)
valid_loss, valid_acc = evaluate(model, val_loader, criterion, args.cuda)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), ckpt_path)
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s | Acc: {valid_acc:.3f}')
test_acc = 0.0
writer.add_scalars('scalar/accs', {'val': valid_acc, 'test': test_acc}, epoch)
test_loss = 0.0
writer.add_scalars('scalar/losses', {'train': train_loss, 'val': valid_loss, 'test': test_loss}, epoch)
if __name__ == "__main__":
main() | 5,754 | 37.112583 | 145 | py |
speech-privacy | speech-privacy-main/src/model.py | """
Model definition
"""
import random
import torch
import torchaudio
import torch.nn as nn
import torch.nn.functional as F
class VGGVoxClf(nn.Module):
"""Based on https://github.com/zimmerrol/vggvox-pytorch"""
def __init__(self, nOut=1024, log_input=True):
super(VGGVoxClf, self).__init__()
self.log_input = log_input
self.netcnn = nn.Sequential(
nn.Conv2d(1, 96, kernel_size=(5,7), stride=(1,2), padding=(2,2)),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(1,3), stride=(1,2)),
nn.Conv2d(96, 256, kernel_size=(5,5), stride=(2,2), padding=(1,1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(3,3), stride=(2,2)),
nn.Conv2d(256, 384, kernel_size=(3,3), padding=(1,1)),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=(3,3), padding=(1,1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=(3,3), padding=(1,1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(3,3), stride=(2,2)),
nn.Conv2d(256, 512, kernel_size=(4,1), padding=(0,0)),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
)
self.encoder = nn.AdaptiveMaxPool2d((1,1))
out_dim = 512
self.fc = nn.Linear(out_dim, nOut)
self.clf = nn.Sequential(nn.ReLU(), nn.Linear(nOut, 2))
self.instancenorm = nn.InstanceNorm1d(40)
self.torchfb = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=512, win_length=400, hop_length=160, f_min=0.0, f_max=8000, pad=0, n_mels=40)
def forward(self, x):
x = self.torchfb(x)+1e-6
if self.log_input: x = x.log()
x = self.instancenorm(x).unsqueeze(1).detach()
x = self.netcnn(x)
x = self.encoder(x)
x = x.view((x.size()[0], -1))
x = self.fc(x)
x = self.clf(x)
return x
def forward_emb(self, x):
x = self.torchfb(x)+1e-6
if self.log_input: x = x.log()
x = self.instancenorm(x).unsqueeze(1).detach()
x = self.netcnn(x)
x = self.encoder(x)
x = x.view((x.size()[0], -1))
x = self.fc(x)
return x
| 2,424 | 30.907895 | 169 | py |
speech-privacy | speech-privacy-main/src/data_utils.py | """
Data loading utilities
"""
import glob
import math
import numpy as np
import os
import random
import soundfile as sf
import torch
from torch.utils.data import Dataset
def collate_1d_float(batch):
text_xs = []
text_ys = []
max_len_text = 0
for (text_x, _) in batch:
x_len = len(text_x)
if x_len > max_len_text:
max_len_text = x_len
for (text_x, text_y) in batch:
text_x = np.pad(text_x, (0, max_len_text-len(text_x)), 'constant', constant_values=0)
text_xs.append(torch.tensor(text_x))
text_ys.append(text_y)
text_xs = torch.stack(text_xs)
return text_xs.float(), torch.tensor(text_ys)
class librispeech_dataset(Dataset):
def __init__(self, phase, mode='default', num_spkr=1, spkr_seed=-1):
self.mode = mode
if phase == 'train':
data_dir = 'LibriSpeech/train-clean-100'
else:
data_dir = 'LibriSpeech/test-clean'
if not os.access(data_dir, os.R_OK):
raise FileNotFoundError
metadata_path = 'LibriSpeech/SPEAKERS.TXT'
if not os.access(metadata_path, os.R_OK):
raise FileNotFoundError
with open(metadata_path, 'r') as inf:
metadata = inf.readlines()
# 30 | F | train-clean-360 | 25.19 | Annie Coleman Rothenberg
metadata = [l.strip() for l in metadata]
metadata = [l for l in metadata if len(l) > 0 and l[0] != ';']
metadata = [l.split('|') for l in metadata]
metadata = [[l.strip() for l in ll] for ll in metadata]
id_to_gender = {l[0]:l[1] for l in metadata}
ext = 'flac'
search_path = os.path.join(data_dir, '**/*.' + ext)
self.audio_paths = []
self.labels = []
self.speaker_ids = []
file_paths = []
for fname in glob.iglob(search_path, recursive=True):
if '-avc' not in fname and '-gvc' not in fname and '-rb' not in fname and '-pitch' not in fname:
file_path = os.path.realpath(fname)
slash_idx = file_path.rfind('/')
end_idx = file_path[:slash_idx].rfind('/')
start_idx = file_path[:end_idx].rfind('/')+1
speaker_id = file_path[start_idx:end_idx]
self.speaker_ids.append(speaker_id)
self.audio_paths.append(file_path)
gender = id_to_gender[speaker_id]
self.labels.append(int(gender == 'F'))
file_paths.append(file_path)
if phase == 'train':
speaker_list = ['311', '2843', '3664', '3168', '2518', '7190', '78', '831', '8630', '3830', '322', '2391', '7517', '8324', '19', '1898', '7078', '5339', '4051', '4640']
m_list = [s for s in speaker_list if id_to_gender[s] == 'M']
f_list = [s for s in speaker_list if id_to_gender[s] == 'F']
if spkr_seed != -1:
random.Random(spkr_seed).shuffle(m_list)
random.Random(spkr_seed).shuffle(f_list)
speaker_list = m_list[:num_spkr] + f_list[:num_spkr]
print(speaker_list)
else:
speaker_list = m_list[:num_spkr] + f_list[:num_spkr]
speaker_set = set(speaker_list)
self.audio_paths = [p for i, p in enumerate(self.audio_paths) if self.speaker_ids[i] in speaker_set]
self.labels = [p for i, p in enumerate(self.labels) if self.speaker_ids[i] in speaker_set]
self.speaker_ids = [p for i, p in enumerate(self.speaker_ids) if self.speaker_ids[i] in speaker_set]
def __len__(self):
return len(self.labels)
def __getitem__(self, i):
audio_path = self.audio_paths[i]
if self.mode == 'avc':
audio_path = audio_path.replace('.flac', '-avc.flac')
elif self.mode == 'gvc':
audio_path = audio_path.replace('.flac', '-gvc.flac')
elif self.mode == 'pitch':
audio_path = audio_path.replace('.flac', '-rb.flac')
elif self.mode != 'default':
audio_path = audio_path.replace('.flac', '-%s.flac' % self.mode)
audio, sr = sf.read(audio_path)
return audio.astype(float), self.labels[i]
| 4,239 | 37.899083 | 180 | py |
neural-lpcfg | neural-lpcfg-master/flow.py | from __future__ import print_function
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
import pdb
class ReLUNet(nn.Module):
def __init__(self, hidden_layers, hidden_units, in_features, out_features):
super(ReLUNet, self).__init__()
self.hidden_layers = hidden_layers
self.in_layer = nn.Linear(in_features, hidden_units, bias=True)
self.out_layer = nn.Linear(hidden_units, out_features, bias=True)
for i in range(hidden_layers):
name = 'cell{}'.format(i)
cell = nn.Linear(hidden_units, hidden_units, bias=True)
setattr(self, name, cell)
def reset_parameters(self):
self.in_layer.reset_parameters()
self.out_layer.reset_parameters()
for i in range(self.hidden_layers):
name = 'cell{}'.format(i)
getattr(self, name).reset_parameters()
def init_identity(self):
self.in_layer.weight.data.zero_()
self.in_layer.bias.data.zero_()
self.out_layer.weight.data.zero_()
self.out_layer.bias.data.zero_()
for i in range(self.hidden_layers):
name = 'cell{}'.format(i)
getattr(self, name).weight.data.zero_()
getattr(self, name).bias.data.zero_()
def forward(self, input):
"""
input: (batch_size, seq_length, in_features)
output: (batch_size, seq_length, out_features)
"""
h = self.in_layer(input)
h = F.relu(h)
for i in range(self.hidden_layers):
name = 'cell{}'.format(i)
h = getattr(self, name)(h)
h = F.relu(h)
return self.out_layer(h)
class NICETrans(nn.Module):
def __init__(self,
couple_layers,
cell_layers,
hidden_units,
features):
super(NICETrans, self).__init__()
self.couple_layers = couple_layers
for i in range(couple_layers):
name = 'cell{}'.format(i)
cell = ReLUNet(cell_layers, hidden_units, features//2, features//2)
setattr(self, name, cell)
def reset_parameters(self):
for i in range(self.couple_layers):
name = 'cell{}'.format(i)
getattr(self, name).reset_parameters()
def init_identity(self):
for i in range(self.couple_layers):
name = 'cell{}'.format(i)
getattr(self, name).init_identity()
def forward(self, input):
"""
input: (seq_length, batch_size, features)
h: (seq_length, batch_size, features)
"""
# For NICE it is a constant
jacobian_loss = torch.zeros(1, device=next(self.parameters()).device, requires_grad=False)
ep_size = input.size()
features = ep_size[-1]
# h = odd_input
h = input
for i in range(self.couple_layers):
name = 'cell{}'.format(i)
h1, h2 = torch.split(h, features//2, dim=-1)
if i%2 == 0:
h = torch.cat((h1, h2 + getattr(self, name)(h1)), dim=-1)
else:
h = torch.cat((h1 + getattr(self, name)(h2), h2), dim=-1)
return h, jacobian_loss
class FlowWordEmission(nn.Module):
def __init__(self, features, vocab_size, couple_layers, cell_layers, hidden_units):
super(FlowWordEmission, self).__init__()
self.features = features
self.nice = NICETrans(couple_layers, cell_layers, hidden_units, features)
self.mu_mlp = nn.Linear(features//2, features)
self.logvar_mlp = nn.Linear(features//2, features)
self.word_emb = nn.Embedding(vocab_size, features)
def forward(self, state_emb, sents):
sents = self.word_emb(sents)
sents, jacobian_loss = self.nice(sents)
mu = self.mu_mlp(state_emb[:, :, :, :self.features//2])
logvar = self.logvar_mlp(state_emb[:, :, :, self.features//2:])
sents = sents.unsqueeze(2).expand_as(mu)
log_prob = - 0.5 * (self.features * np.log(2 * np.pi)) - 0.5 * ((sents - mu) * (sents - mu) * torch.exp(logvar)).sum(dim=-1)
return log_prob | 4,206 | 34.058333 | 132 | py |
neural-lpcfg | neural-lpcfg-master/utils.py | #Norm!/usr/bin/env python3
import numpy as np
import itertools
import random
import torch
import nltk
import pickle
import pdb
def all_binary_trees(n):
#get all binary trees of length n
def is_tree(tree, n):
# shift = 0, reduce = 1
if sum(tree) != n-1:
return False
stack = 0
for a in tree:
if a == 0:
stack += 1
else:
if stack < 2:
return False
stack -= 1
if stack < 0:
return False
return True
valid_tree = []
num_shift = 0
num_reduce = 0
num_actions = 2*n - 1
trees = map(list, itertools.product([0,1], repeat = num_actions-3))
start = [0, 0] #first two actions are always shift
end = [1] # last action is always reduce
for tree in trees:
tree = start + tree + end
if is_tree(tree, n):
valid_tree.append(tree[::])
return valid_tree
def get_actions(tree, SHIFT = 0, REDUCE = 1, OPEN='(', CLOSE=')'):
#input tree in bracket form: ((A B) (C D))
#output action sequence: S S R S S R R
actions = []
tree = tree.strip()
i = 0
num_shift = 0
num_reduce = 0
left = 0
right = 0
while i < len(tree):
if tree[i] != ' ' and tree[i] != OPEN and tree[i] != CLOSE: #terminal
if tree[i-1] == OPEN or tree[i-1] == ' ':
actions.append(SHIFT)
num_shift += 1
elif tree[i] == CLOSE:
actions.append(REDUCE)
num_reduce += 1
right += 1
elif tree[i] == OPEN:
left += 1
i += 1
assert(num_shift == num_reduce + 1)
return actions
def get_tree(actions, sent = None, SHIFT = 0, REDUCE = 1):
#input action and sent (lists), e.g. S S R S S R R, A B C D
#output tree ((A B) (C D))
stack = []
pointer = 0
if sent is None:
sent = list(map(str, range((len(actions)+1) // 2)))
# assert(len(actions) == 2*len(sent) - 1)
for action in actions:
if action == SHIFT:
word = sent[pointer]
stack.append(word)
pointer += 1
elif action == REDUCE:
right = stack.pop()
left = stack.pop()
stack.append('(' + left + ' ' + right + ')')
assert(len(stack) == 1)
return stack[-1]
def get_depth(tree, SHIFT = 0, REDUCE = 1):
stack = []
depth = 0
max = 0
curr_max = 0
for c in tree:
if c == '(':
curr_max += 1
if curr_max > max:
max = curr_max
elif c == ')':
curr_max -= 1
assert(curr_max == 0)
return max
def get_spans(actions, SHIFT = 0, REDUCE = 1):
sent = list(range((len(actions)+1) // 2))
spans = []
pointer = 0
stack = []
for action in actions:
if action == SHIFT:
word = sent[pointer]
stack.append(word)
pointer += 1
elif action == REDUCE:
right = stack.pop()
left = stack.pop()
if isinstance(left, int):
left = (left, None)
if isinstance(right, int):
right = (None, right)
new_span = (left[0], right[1])
spans.append(new_span)
stack.append(new_span)
return spans
def get_stats(span1, span2):
tp = 0
fp = 0
fn = 0
for span in span1:
if span in span2:
tp += 1
else:
fp += 1
for span in span2:
if span not in span1:
fn += 1
return tp, fp, fn
from collections import defaultdict
def get_stats_by_cat(span1, span2, gold_tree):
tp = defaultdict(int)
all_ = defaultdict(int)
for span in span1:
if span in span2:
tp[gold_tree[span][1]] += 1
all_[gold_tree[span][1]] += 1
return tp, all_
def update_stats(pred_span, gold_spans, stats):
for gold_span, stat in zip(gold_spans, stats):
tp, fp, fn = get_stats(pred_span, gold_span)
stat[0] += tp
stat[1] += fp
stat[2] += fn
def get_f1(stats):
f1s = []
for stat in stats:
prec = stat[0] / (stat[0] + stat[1]) if stat[0] + stat[1] > 0 else 0.
recall = stat[0] / (stat[0] + stat[2]) if stat[0] + stat[2] > 0 else 0.
f1 = 2*prec*recall / (prec + recall)*100 if prec+recall > 0 else 0.
f1s.append(f1)
return f1s
def get_random_tree(length, SHIFT = 0, REDUCE = 1):
tree = [SHIFT, SHIFT]
stack = ['', '']
num_shift = 2
while len(tree) < 2*length - 1:
if len(stack) < 2:
tree.append(SHIFT)
stack.append('')
num_shift += 1
elif num_shift >= length:
tree.append(REDUCE)
stack.pop()
else:
if random.random() < 0.5:
tree.append(SHIFT)
stack.append('')
num_shift += 1
else:
tree.append(REDUCE)
stack.pop()
return tree
def span_str(start = None, end = None):
assert(start is not None or end is not None)
if start is None:
return ' ' + str(end) + ')'
elif end is None:
return '(' + str(start) + ' '
else:
return ' (' + str(start) + ' ' + str(end) + ') '
def get_tree_from_binary_matrix(matrix, length):
sent = list(map(str, range(length)))
n = len(sent)
tree = {}
for i in range(n):
tree[i] = sent[i]
for k in np.arange(1, n):
for s in np.arange(n):
t = s + k
if t > n-1:
break
if matrix[s][t].item() == 1:
span = '(' + tree[s] + ' ' + tree[t] + ')'
tree[s] = span
tree[t] = span
return tree[0]
def get_nonbinary_spans(actions, SHIFT = 0, REDUCE = 1):
spans = []
stack = []
pointer = 0
binary_actions = []
nonbinary_actions = []
num_shift = 0
num_reduce = 0
for action in actions:
# print(action, stack)
if action == "SHIFT":
nonbinary_actions.append(SHIFT)
stack.append((pointer, pointer))
pointer += 1
binary_actions.append(SHIFT)
num_shift += 1
elif action[:3] == 'NT(':
stack.append('(')
elif action == "REDUCE":
nonbinary_actions.append(REDUCE)
right = stack.pop()
left = right
n = 1
while stack[-1] is not '(':
left = stack.pop()
n += 1
span = (left[0], right[1])
if left[0] != right[1]:
spans.append(span)
stack.pop()
stack.append(span)
while n > 1:
n -= 1
binary_actions.append(REDUCE)
num_reduce += 1
else:
assert False
assert(len(stack) == 1)
assert(num_shift == num_reduce + 1)
return spans, binary_actions, nonbinary_actions
def get_nonbinary_tree(sent, tags, actions):
pointer = 0
tree = []
for action in actions:
if action[:2] == "NT":
node_label = action[:-1].split("NT")[1]
node_label = node_label.split("-")[0]
tree.append(node_label)
elif action == "REDUCE":
tree.append(")")
elif action == "SHIFT":
leaf = "(" + tags[pointer] + " " + sent[pointer] + ")"
pointer += 1
tree.append(leaf)
else:
assert(False)
assert(pointer == len(sent))
return " ".join(tree).replace(" )", ")")
def build_tree(depth, sen):
assert len(depth) == len(sen)
if len(depth) == 1:
parse_tree = sen[0]
else:
idx_max = np.argmax(depth)
parse_tree = []
if len(sen[:idx_max]) > 0:
tree0 = build_tree(depth[:idx_max], sen[:idx_max])
parse_tree.append(tree0)
tree1 = sen[idx_max]
if len(sen[idx_max + 1:]) > 0:
tree2 = build_tree(depth[idx_max + 1:], sen[idx_max + 1:])
tree1 = [tree1, tree2]
if parse_tree == []:
parse_tree = tree1
else:
parse_tree.append(tree1)
return parse_tree
def get_brackets(tree, idx=0):
brackets = set()
if isinstance(tree, list) or isinstance(tree, nltk.Tree):
for node in tree:
node_brac, next_idx = get_brackets(node, idx)
if next_idx - idx > 1:
brackets.add((idx, next_idx))
brackets.update(node_brac)
idx = next_idx
return brackets, idx
else:
return brackets, idx + 1
def get_nonbinary_spans_label(actions, SHIFT = 0, REDUCE = 1):
spans = []
stack = []
pointer = 0
binary_actions = []
num_shift = 0
num_reduce = 0
for action in actions:
# print(action, stack)
if action == "SHIFT":
stack.append((pointer, pointer))
pointer += 1
binary_actions.append(SHIFT)
num_shift += 1
elif action[:3] == 'NT(':
label = "(" + action.split("(")[1][:-1]
stack.append(label)
elif action == "REDUCE":
right = stack.pop()
left = right
n = 1
while stack[-1][0] is not '(':
left = stack.pop()
n += 1
span = (left[0], right[1], stack[-1][1:])
if left[0] != right[1]:
spans.append(span)
stack.pop()
stack.append(span)
while n > 1:
n -= 1
binary_actions.append(REDUCE)
num_reduce += 1
else:
assert False
assert(len(stack) == 1)
assert(num_shift == num_reduce + 1)
return spans, binary_actions
def get_tagged_parse(parse, spans):
spans = sorted(spans, key=lambda x:(x[0], -x[1]))
i = 0
ret = ''
for segment in parse.split():
word_start = 0
word_end = len(segment)
while(word_start < len(segment) and segment[word_start] == '('):
word_start += 1
while(word_end > 0 and segment[word_end-1] == ')'):
word_end -= 1
for _ in range(0, word_start):
ret += '('+'{}-{} '.format(spans[i][2], spans[i][3])
i += 1
ret += '{}-{} {} '.format(spans[i][2], spans[i][3], segment[word_start:word_end])
i += 1
for _ in range(word_end, len(segment)):
ret += ')'
ret += ' '
return ret
def conll_sentences(file, indices):
sentence = []
for line in file:
if(line != "\n"):
sentence.append(line.strip().split('\t'))
else:
ret = []
for line in sentence:
ret.append([line[i] for i in indices])
yield ret
sentence = []
if(len(sentence)):
ret = []
for line in sentence:
ret.append([line[i] for i in indices])
yield ret
def read_conll(file, max_len=None):
for line in conll_sentences(file, [1, 6]):
if(max_len is None or len(line) <= max_len):
words = [i[0] for i in line]
heads = [int(i[1]) for i in line]
yield(words, heads)
def measures(gold_s, parse_s):
# Helper for eval().
(d, u) = (0, 0)
for (a, b) in gold_s:
(a, b) = (a-1, b-1)
b1 = (a, b) in parse_s
b2 = (b, a) in parse_s
if b1:
d += 1.0
u += 1.0
if b2:
u += 1.0
return (d, u)
def get_head(spans, predict_head, running_head=-1):
this_span = spans[-1]
spans = spans[:-1]
if(this_span[3] != running_head):
predict_head[this_span[3]] = running_head
if(this_span[0] != this_span[1]):
spans = get_head(spans, predict_head, this_span[3])
spans = get_head(spans, predict_head, this_span[3])
return spans
def update_dep_stats(spans, heads, dep_stats):
predict_head = [-1 for _ in heads]
get_head(spans, predict_head)
dir_cnt, undir_cnt = measures([(i+1, j) for i, j in enumerate(heads)], list(enumerate(predict_head)))
dep_stats.append([len(heads), dir_cnt, undir_cnt])
def get_dep_acc(dep_stats):
cnt = dir_cnt = undir_cnt = 0.
for i, j, k in dep_stats:
cnt += i
dir_cnt += j
undir_cnt += k
return dir_cnt / cnt * 100, undir_cnt / cnt * 100
def get_word_emb_matrix(wv_file, idx2word):
wv = pickle.load(open(wv_file, "rb"))
dim = wv['a'].size
ret = []
found_cnt, unfound_cnt = 0, 0
for i in range(len(idx2word)):
word = idx2word[i]
try:
word_vec = wv[word]
found_cnt += 1
except KeyError:
word_vec = np.random.randn(dim)
word_vec /= np.linalg.norm(word_vec, 2)
unfound_cnt += 1
ret.append(word_vec)
print("WARNING: {} words found, and {} word not found".format(found_cnt, unfound_cnt))
return np.stack(ret)
def get_span2head(spans, heads, gold_actions=None, gold_tags=None):
from cfg2dep import parse_line
def dfs(spans, heads, nts, tags):
if(len(spans) == 0):
return -1, {}
l, r = spans[-1]
label = nts.pop()
spans.pop()
root_list = []
ret_dict = {}
i = l
while(i <= r):
if(len(spans) == 0 or spans[-1][0] != i):
# single word span
root_list.append(i)
ret_dict[(i, i)] = (i, tags.pop())
i += 1
else:
i = spans[-1][1] + 1
root, sub_dict = dfs(spans, heads, nts, tags)
ret_dict.update(sub_dict)
root_list.append(root)
for i in root_list:
if(heads[i] < l or heads[i] > r):
ret_dict[(l, r)] = (i, label)
return i, ret_dict
def get_nts(gold_actions):
return [i[3:-1] for i in gold_actions if i[0] == "N"]
heads_set = [i-1 for i in heads]
sorted_spans = sorted(spans, key=lambda x: (-x[0], x[1]))
nts = list(reversed(get_nts(gold_actions))) if gold_actions else None
tags = list(reversed(gold_tags)) if gold_tags else None
_, span2head = dfs(sorted_spans, heads_set, nts, tags)
return span2head
NT_list = ['NP', 'VP', 'S', 'ADVP', 'PP', 'ADJP', 'SBAR', 'WHADVP', 'WHNP', 'PRN', 'SINV', 'QP', 'PRT', 'NAC', 'NX', 'UCP', 'FRAG', 'INTJ', 'X', 'RRC', 'SQ', 'CONJP', 'WHPP', 'WHADJP', 'SBARQ', 'LST', 'PRT|ADVP']
PT_list = ['DT', 'JJ', 'NNS', 'VBD', 'NN', 'CC', 'RB', 'IN', 'JJS', 'NNP', 'CD', 'TO', 'JJR', 'VBG', 'POS', 'VBP', 'VBN', 'RBR', 'WRB', 'PRP', 'PRP$', 'WDT', 'EX', 'MD', 'VB', 'VBZ', 'NNPS', 'WP', 'RP', 'PDT', 'WP$', 'RBS', 'FW', 'UH', 'SYM', 'LS']
NT2ID = {j:i for i, j in enumerate(NT_list)}
PT2ID = {j:i for i, j in enumerate(PT_list)} | 13,274 | 25.87247 | 248 | py |
neural-lpcfg | neural-lpcfg-master/data.py | #!/usr/bin/env python3
import numpy as np
import torch
import pickle
class Dataset(object):
def __init__(self, data_file, load_dep=False):
data = pickle.load(open(data_file, 'rb')) #get text data
self.sents = self._convert(data['source']).long()
self.other_data = data['other_data']
self.sent_lengths = self._convert(data['source_l']).long()
self.batch_size = self._convert(data['batch_l']).long()
self.batch_idx = self._convert(data['batch_idx']).long()
self.vocab_size = data['vocab_size'][0]
self.num_batches = self.batch_idx.size(0)
self.word2idx = data['word2idx']
self.idx2word = data['idx2word']
self.load_dep = load_dep
def _convert(self, x):
return torch.from_numpy(np.asarray(x))
def __len__(self):
return self.num_batches
def __getitem__(self, idx):
assert(idx < self.num_batches and idx >= 0)
start_idx = self.batch_idx[idx]
end_idx = start_idx + self.batch_size[idx]
length = self.sent_lengths[idx].item()
sents = self.sents[start_idx:end_idx]
other_data = self.other_data[start_idx:end_idx]
sent_str = [d[0] for d in other_data]
tags = [d[1] for d in other_data]
actions = [d[2] for d in other_data]
binary_tree = [d[3] for d in other_data]
spans = [d[5] for d in other_data]
if(self.load_dep):
heads = [d[7] for d in other_data]
batch_size = self.batch_size[idx].item()
# original data includes </s>, which we don't need
data_batch = [sents[:, 1:length-1], length-2, batch_size, tags, actions,
spans, binary_tree, other_data]
if(self.load_dep):
data_batch.append(heads)
return data_batch
| 1,666 | 34.468085 | 77 | py |
neural-lpcfg | neural-lpcfg-master/lexicalizedPCFG.py | #!/usr/bin/env python3
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import itertools
import random
from torch.cuda import memory_allocated
import pdb
class LexicalizedPCFG(nn.Module):
# Lexicalized PCFG:
# S → A[x] A ∈ N, x ∈ 𝚺
# A[x] → B[x] C[y] A, B, C ∈ N ∪ P, x, y ∈ 𝚺
# A[x] → B[y] C[x] A, B, C ∈ N ∪ P, x, y ∈ 𝚺
# T[x] → x T ∈ P, x ∈ 𝚺
def __init__(self, nt_states, t_states, nt_emission=False, supervised_signals = []):
super(LexicalizedPCFG, self).__init__()
self.nt_states = nt_states
self.t_states = t_states
self.states = nt_states + t_states
self.nt_emission = nt_emission
self.huge = 1e9
if(self.nt_emission):
self.word_span_slice = slice(self.states)
else:
self.word_span_slice = slice(self.nt_states,self.states)
self.supervised_signals = supervised_signals
# def logadd(self, x, y):
# d = torch.max(x,y)
# return torch.log(torch.exp(x-d) + torch.exp(y-d)) + d
def logadd(self, x, y):
names = x.names
assert names == y.names, "Two operants' names are not matched {} and {}.".format(names, y.names)
return torch.logsumexp(torch.stack([x.rename(None), y.rename(None)]), dim=0).refine_names(*names)
def logsumexp(self, x, dim=1):
d = torch.max(x, dim)[0]
if x.dim() == 1:
return torch.log(torch.exp(x - d).sum(dim)) + d
else:
return torch.log(torch.exp(x - d.unsqueeze(dim).expand_as(x)).sum(dim)) + d
def __get_scores(self, unary_scores, rule_scores, root_scores, dir_scores):
# INPUT
# unary scores : b x n x (NT + T)
# rule scores : b x (NT+T) x (NT+T) x (NT+T)
# root_scores : b x NT
# dir_scores : 2 x b x NT x (NT + T) x (NT + T) x N
# OUTPUT
# rule scores: 2 x B x (NT x T) x (NT x T) x (NT x T) x N
# (D, B, T, TL, TR, H)
# root_scores : b x NT x n
# (B, T, H)
assert unary_scores.names == ('B', 'H', 'T')
assert rule_scores.names == ('B', 'T', 'TL', 'TR')
assert root_scores.names == ('B', 'T')
assert dir_scores.names == ('D', 'B', 'T', 'H', 'TL', 'TR')
rule_shape = ('D', 'B', 'T', 'H', 'TL', 'TR')
root_shape = ('B', 'T', 'H')
rule_scores = rule_scores.align_to(*rule_shape) \
+ dir_scores.align_to(*rule_shape)
if rule_scores.size('H') == 1:
rule_scores = rule_scores.expand(-1, -1, -1, unary_scores.size('H'), -1, -1)
return rule_scores, root_scores, unary_scores
def __get_scores(self, unary_scores, rule_scores, root_scores):
# INPUT
# unary scores : b x n x (NT + T)
# rule scores : b x (NT+T) x (NT+T) x (NT+T)
# root_scores : b x NT
# dir_scores : 2 x b x NT x (NT + T) x (NT + T) x N
# OUTPUT
# rule scores: 2 x B x (NT x T) x (NT x T) x (NT x T) x N
# (D, B, T, TL, TR, H)
# root_scores : b x NT x n
# (B, T, H)
assert unary_scores.names == ('B', 'H', 'T')
assert rule_scores.names == ('B', 'T', 'H', 'TL', 'TR', 'D')
assert root_scores.names == ('B', 'T')
rule_shape = ('D', 'B', 'T', 'H', 'TL', 'TR')
root_shape = ('B', 'T', 'H')
rule_scores = rule_scores.align_to(*rule_shape)
if rule_scores.size('H') == 1:
rule_scores = rule_scores.expand(-1, -1, -1, unary_scores.size('H'), -1, -1)
return rule_scores, root_scores, unary_scores
def print_name_size(self, x):
print(x.size(), x.names)
def print_memory_usage(self, lineno, device="cuda:0"):
print("Line {}: {}M".format(lineno, int(memory_allocated(device)/1000000)))
def cross_bracket(self, l, r, gold_brackets):
for bl, br in gold_brackets:
if((bl<l<=br and r>br) or (l<bl and bl<=r<br)):
return True
return False
def get_mask(self, B, N, T, gold_tree):
mask = self.beta.new(B, N+1, N+1, T, N).fill_(0)
for i in range(B):
gold_brackets = gold_tree[i].keys()
if "phrase" in self.supervised_signals:
for l in range(N):
for r in range(l, N):
if(self.cross_bracket(l, r, gold_brackets)):
mask[i][l, r+1].fill_(-self.huge)
for l, r in gold_brackets:
mask[i][l, r+1].fill_(-self.huge)
acceptable_heads = slice(gold_tree[i][(l, r)][0], gold_tree[i][(l, r)][0] + 1)\
if "head" in self.supervised_signals else slice(l, r+1)
if(l == r):
if(gold_tree[i][(l, r)][1] < self.t_states and "tag" in self.supervised_signals):
mask[i][l, r+1, gold_tree[i][(l, r)][1] + self.nt_states, acceptable_heads] = 0
else:
mask[i][l, r+1, :, acceptable_heads] = 0
else:
if(gold_tree[i][(l, r)][1] < self.nt_states and "nt" in self.supervised_signals):
mask[i][l, r+1, gold_tree[i][(l, r)][1], acceptable_heads] = 0
else:
mask[i][l, r+1, :, acceptable_heads] = 0
return mask
def _inside(self, gold_tree=None, **kwargs):
#inside step
rule_scores, root_scores, unary_scores = self.__get_scores(**kwargs)
# statistics
B = rule_scores.size('B')
N = unary_scores.size('H')
T = self.states
# uses conventional python numbering scheme: [s, t] represents span [s, t)
# this scheme facilitates fast computation
# f[s, t] = logsumexp(f[s, :] * f[:, t])
self.beta = rule_scores.new(B, N + 1, N + 1, T, N).fill_(-self.huge).refine_names('B', 'L', 'R', 'T', 'H')
self.beta_ = rule_scores.new(B, N + 1, N + 1, T).fill_(-self.huge).refine_names('B', 'L', 'R', 'T')
if(not gold_tree is None):
mask = self.get_mask(B, N, T, gold_tree)
else:
mask = self.beta.new(B, N+1, N+1, T, N).fill_(0)
# initialization: f[k, k+1]
for k in range(N):
for state in range(self.states):
if(not self.nt_emission and state < self.nt_states):
continue
self.beta[:, k, k+1, state, k] = mask[:, k, k+1, state, k]
self.beta_[:, k, k+1, state] = unary_scores[:, k, state].rename(None) + mask[:, k, k+1, state, k].rename(None)
# span length w, at least 2
for W in np.arange(2, N+1):
# start point s
for l in range(N-W+1):
r = l + W
f = lambda x:torch.logsumexp(x.align_to('B', 'T', 'H', ...).rename(None).reshape(B, self.nt_states, W, -1), dim=3).refine_names('B', 'T', 'H')
left = lambda x, y, z: x.rename(T='TL').align_as(z) + y.rename(T='TR').align_as(z) + z
right = lambda x, y, z: x.rename(T='TL').align_as(z) + y.rename(T='TR').align_as(z) + z
g = lambda x, y, x_, y_, z: torch.cat((left(x, y_, z[0]).align_as(z),
right(x_, y, z[1]).align_as(z)), dim='D')
if W == 2:
tmp = g(self.beta[:, l, l+1, self.word_span_slice, l:r],
self.beta[:, l+1, r, self.word_span_slice, l:r],
self.beta_[:, l, l+1, self.word_span_slice],
self.beta_[:, l+1, r, self.word_span_slice],
rule_scores[:, :, :, l:r, self.word_span_slice, self.word_span_slice])
tmp = f(tmp)
elif W == 3:
tmp1 = g(self.beta[:, l, l+1, self.word_span_slice, l:r],
self.beta[:, l+1, r, :self.nt_states, l:r],
self.beta_[:, l, l+1, self.word_span_slice],
self.beta_[:, l+1, r, :self.nt_states],
rule_scores[:, :, :, l:r, self.word_span_slice, :self.nt_states])
tmp2 = g(self.beta[:, l, r-1, :self.nt_states, l:r],
self.beta[:, r-1, r, self.word_span_slice, l:r],
self.beta_[:, l, r-1, :self.nt_states],
self.beta_[:, r-1, r, self.word_span_slice],
rule_scores[:, :, :, l:r, :self.nt_states, self.word_span_slice])
tmp = self.logadd(f(tmp1), f(tmp2))
elif W >= 4:
tmp1 = g(self.beta[:, l, l+1, self.word_span_slice, l:r],
self.beta[:, l+1, r, :self.nt_states, l:r],
self.beta_[:, l, l+1, self.word_span_slice],
self.beta_[:, l+1, r, :self.nt_states],
rule_scores[:, :, :, l:r, self.word_span_slice, :self.nt_states])
tmp2 = g(self.beta[:, l, r-1, :self.nt_states, l:r],
self.beta[:, r-1, r, self.word_span_slice, l:r],
self.beta_[:, l, r-1, :self.nt_states],
self.beta_[:, r-1, r, self.word_span_slice],
rule_scores[:, :, :, l:r, :self.nt_states, self.word_span_slice])
tmp3 = g(self.beta[:, l, l+2:r-1, :self.nt_states, l:r].rename(R='U'),
self.beta[:, l+2:r-1, r, :self.nt_states, l:r].rename(L='U'),
self.beta_[:, l, l+2:r-1, :self.nt_states].rename(R='U'),
self.beta_[:, l+2:r-1, r, :self.nt_states].rename(L='U'),
rule_scores[:, :, :, l:r, :self.nt_states, :self.nt_states].align_to('D', 'B', 'T', 'H', 'U', ...))
tmp = self.logadd(self.logadd(f(tmp1), f(tmp2)), f(tmp3))
tmp = tmp + mask[:, l, r, :self.nt_states, l:r]
self.beta[:, l, r, :self.nt_states, l:r] = tmp.rename(None)
tmp_ = torch.logsumexp(tmp + unary_scores[:, l:r, :self.nt_states].align_as(tmp), dim='H')
self.beta_[:, l, r, :self.nt_states] = tmp_.rename(None)
log_Z = self.beta_[:, 0, N, :self.nt_states] + root_scores
log_Z = torch.logsumexp(log_Z, dim='T')
return log_Z
def _viterbi(self, **kwargs):
#unary scores : b x n x T
#rule scores : b x NT x (NT+T) x (NT+T)
rule_scores, root_scores, unary_scores = self.__get_scores(**kwargs)
# statistics
B = rule_scores.size('B')
N = unary_scores.size('H')
T = self.states
# # dummy rules
# rule_scores = torch.cat([rule_scores, \
# rule_scores.new(B, self.t_states, T, T) \
# .fill_(-self.huge)], dim=1)
self.scores = rule_scores.new(B, N+1, N+1, T, N).fill_(-self.huge).refine_names('B', 'L', 'R', 'T', 'H')
self.scores_ = rule_scores.new(B, N+1, N+1, T).fill_(-self.huge).refine_names('B', 'L', 'R', 'T')
self.bp = rule_scores.new(B, N+1, N+1, T, N).long().fill_(-1).refine_names('B', 'L', 'R', 'T', 'H')
self.left_bp = rule_scores.new(B, N+1, N+1, T, N).long().fill_(-1).refine_names('B', 'L', 'R', 'T', 'H')
self.right_bp = rule_scores.new(B, N+1, N+1, T, N).long().fill_(-1).refine_names('B', 'L', 'R', 'T', 'H')
self.dir_bp = rule_scores.new(B, N+1, N+1, T, N).long().fill_(-1).refine_names('B', 'L', 'R', 'T', 'H')
self.new_head_bp = rule_scores.new(B, N+1, N+1, T).long().fill_(-1).refine_names('B', 'L', 'R', 'T')
self.argmax = rule_scores.new(B, N, N).long().fill_(-1)
self.argmax_tags = rule_scores.new(B, N).long().fill_(-1)
self.spans = [[] for _ in range(B)]
# initialization: f[k, k+1]
for k in range(N):
for state in range(self.states):
if(not self.nt_emission and state < self.nt_states):
continue
self.scores[:, k, k+1, state, k] = 0
self.scores_[:, k, k+1, state] = unary_scores[:, k, state].rename(None)
self.new_head_bp[:, k, k+1, state] = k
for W in np.arange(2, N+1):
for l in range(N-W+1):
r = l + W
left = lambda x, y, z: x.rename(T='TL').align_as(z) + y.rename(T='TR').align_as(z) + z
right = lambda x, y, z: x.rename(T='TL').align_as(z) + y.rename(T='TR').align_as(z) + z
g = lambda x, y, x_, y_, z: torch.cat((left(x, y_, z[0]).align_as(z),
right(x_, y, z[1]).align_as(z)), dim='D')
# self.print_name_size(self.scores[:, l, l+1:r, :, l:r])
# self.print_name_size(rule_scores[:, :, :, l:r, :self.nt_states, :self.nt_states, l:r].align_to('D', 'B', 'T', 'H', 'U', ...))
tmp = g(self.scores[:, l, l+1:r, :, l:r].rename(R='U'),
self.scores[:, l+1:r, r, :, l:r].rename(L='U'),
self.scores_[:, l, l+1:r, :].rename(R='U'),
self.scores_[:, l+1:r, r, :].rename(L='U'),
rule_scores[:, :, :, l:r, :, :].align_to('D', 'B', 'T', 'H', 'U', ...))
tmp = tmp.align_to('B', 'T', 'H', 'D', 'U', 'TL', 'TR').flatten(['D', 'U', 'TL', 'TR'], 'position')
assert(tmp.size('position') == self.states * self.states * (W-1) * 2), "{}".format(tmp.size('position'))
# view once and marginalize
tmp, max_pos = torch.max(tmp, dim=3)
max_pos = max_pos.rename(None)
right_child = max_pos % self.states
max_pos /= self.states
left_child = max_pos % self.states
max_pos /= self.states
max_idx = max_pos % (W-1) + l + 1
max_pos = max_pos / int(W - 1)
max_dir = max_pos
self.scores[:, l, r, :self.nt_states, l:r] = tmp.rename(None)
tmp_ = tmp + unary_scores[:, l:r, :self.nt_states].align_as(tmp)
tmp_, new_head = torch.max(tmp_, dim='H')
self.scores_[:, l, r, :self.nt_states] = tmp_.rename(None)
self.bp[:, l, r, :self.nt_states, l:r] = max_idx
self.left_bp[:, l, r, :self.nt_states, l:r] = left_child
self.right_bp[:, l, r, :self.nt_states, l:r] = right_child
self.dir_bp[:, l, r, :self.nt_states, l:r] = max_dir
self.new_head_bp[:, l, r, :self.nt_states] = new_head.rename(None) + l
max_score = self.scores_[:, 0, N, :self.nt_states] + root_scores
max_score, max_idx = torch.max(max_score, dim='T')
for b in range(B):
self._backtrack(b, 0, N, max_idx[b].item())
return self.scores, self.argmax, self.spans
def _backtrack(self, b, s, t, state, head=-1):
if(head == -1):
head = int(self.new_head_bp[b][s][t][state])
u = int(self.bp[b][s][t][state][head])
assert(s < t), "s: %d, t %d"%(s, t)
left_state = int(self.left_bp[b][s][t][state][head])
right_state = int(self.right_bp[b][s][t][state][head])
direction = int(self.dir_bp[b][s][t][state][head])
self.argmax[b][s][t-1] = 1
if s == t-1:
self.spans[b].insert(0, (s, t-1, state, head))
self.argmax_tags[b][s] = state
return None
else:
self.spans[b].insert(0, (s, t-1, state, head))
if(direction == 0):
assert head < u, "head: {} < u: {}".format(head, u)
self._backtrack(b, s, u, left_state, head)
self._backtrack(b, u, t, right_state)
else:
assert head >= u, "head: {} >= u: {}".format(head, u)
self._backtrack(b, s, u, left_state)
self._backtrack(b, u, t, right_state, head)
return None | 14,743 | 42.880952 | 150 | py |
neural-lpcfg | neural-lpcfg-master/models.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from PCFG import PCFG
from lexicalizedPCFG import LexicalizedPCFG
from random import shuffle
from flow import FlowWordEmission
import pdb
class ResidualLayer(nn.Module):
def __init__(self, in_dim = 100,
out_dim = 100):
super(ResidualLayer, self).__init__()
self.lin1 = nn.Linear(in_dim, out_dim)
self.lin2 = nn.Linear(out_dim, out_dim)
def forward(self, x):
return F.relu(self.lin2(F.relu(self.lin1(x)))) + x
class CompPCFG(nn.Module):
def __init__(self, vocab = 100,
h_dim = 512,
w_dim = 512,
z_dim = 64,
state_dim = 256,
t_states = 10,
nt_states = 10,
**kwargs):
super(CompPCFG, self).__init__()
self.state_dim = state_dim
self.t_emb = nn.Parameter(torch.randn(t_states, state_dim))
self.nt_emb = nn.Parameter(torch.randn(nt_states, state_dim))
self.root_emb = nn.Parameter(torch.randn(1, state_dim))
self.pcfg = PCFG(nt_states, t_states)
self.nt_states = nt_states
self.t_states = t_states
self.all_states = nt_states + t_states
self.dim = state_dim
self.register_parameter('t_emb', self.t_emb)
self.register_parameter('nt_emb', self.nt_emb)
self.register_parameter('root_emb', self.root_emb)
self.rule_mlp = nn.Linear(state_dim+z_dim, self.all_states**2)
self.root_mlp = nn.Sequential(nn.Linear(z_dim + state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
nn.Linear(state_dim, self.nt_states))
if z_dim > 0:
self.enc_emb = nn.Embedding(vocab, w_dim)
self.enc_rnn = nn.LSTM(w_dim, h_dim, bidirectional=True, num_layers = 1, batch_first = True)
self.enc_params = nn.Linear(h_dim*2, z_dim*2)
self.z_dim = z_dim
self.vocab_mlp = nn.Sequential(nn.Linear(z_dim + state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
nn.Linear(state_dim, vocab))
def enc(self, x):
emb = self.enc_emb(x)
h, _ = self.enc_rnn(emb)
params = self.enc_params(h.max(1)[0])
mean = params[:, :self.z_dim]
logvar = params[:, self.z_dim:]
return mean, logvar
def kl(self, mean, logvar):
result = -0.5 * (logvar - torch.pow(mean, 2)- torch.exp(logvar) + 1)
return result
def forward(self, x, argmax=False, use_mean=False, **kwargs):
#x : batch x n
n = x.size(1)
batch_size = x.size(0)
if self.z_dim > 0:
mean, logvar = self.enc(x)
kl = self.kl(mean, logvar).sum(1)
z = mean.new(batch_size, mean.size(1)).normal_(0, 1)
z = (0.5*logvar).exp()*z + mean
kl = self.kl(mean, logvar).sum(1)
if use_mean:
z = mean
self.z = z
else:
self.z = torch.zeros(batch_size, 1).cuda()
t_emb = self.t_emb
nt_emb = self.nt_emb
root_emb = self.root_emb
root_emb = root_emb.expand(batch_size, self.state_dim)
t_emb = t_emb.unsqueeze(0).unsqueeze(1).expand(batch_size, n, self.t_states, self.state_dim)
nt_emb = nt_emb.unsqueeze(0).expand(batch_size, self.nt_states, self.state_dim)
if self.z_dim > 0:
root_emb = torch.cat([root_emb, z], 1)
z_expand = z.unsqueeze(1).expand(batch_size, n, self.z_dim)
z_expand = z_expand.unsqueeze(2).expand(batch_size, n, self.t_states, self.z_dim)
t_emb = torch.cat([t_emb, z_expand], 3)
nt_emb = torch.cat([nt_emb, z.unsqueeze(1).expand(batch_size, self.nt_states,
self.z_dim)], 2)
root_scores = F.log_softmax(self.root_mlp(root_emb), 1)
unary_scores = F.log_softmax(self.vocab_mlp(t_emb), 3)
x_expand = x.unsqueeze(2).expand(batch_size, x.size(1), self.t_states).unsqueeze(3)
unary = torch.gather(unary_scores, 3, x_expand).squeeze(3)
rule_score = F.log_softmax(self.rule_mlp(nt_emb), 2) # nt x t**2
rule_scores = rule_score.view(batch_size, self.nt_states, self.all_states, self.all_states)
log_Z = self.pcfg._inside(unary, rule_scores, root_scores)
if self.z_dim == 0:
kl = torch.zeros_like(log_Z)
if argmax:
with torch.no_grad():
max_score, binary_matrix, spans = self.pcfg._viterbi(unary, rule_scores, root_scores)
self.tags = self.pcfg.argmax_tags
return -log_Z, kl, binary_matrix, spans
else:
return -log_Z, kl
class LexicalizedCompPCFG(nn.Module):
def __init__(self, vocab = 100,
h_dim = 512,
w_dim = 512,
z_dim = 64,
state_dim = 256,
t_states = 10,
nt_states = 10,
scalar_dir_scores = False,
seperate_nt_emb_for_emission = False,
head_first = False,
tie_word_emb = False,
variant='IV',
flow_word_emb=False,
couple_layers=4,
cell_layers=1,
pretrained_word_emb=None,
freeze_word_emb=False,
nt_emission = False,
supervised_signals=[]):
super(LexicalizedCompPCFG, self).__init__()
self.state_dim = state_dim
self.t_emb = nn.Parameter(torch.randn(t_states, state_dim))
self.nt_emb = nn.Parameter(torch.randn(nt_states, state_dim))
self.root_emb = nn.Parameter(torch.randn(1, state_dim))
self.pcfg = LexicalizedPCFG(nt_states, t_states, nt_emission=nt_emission, supervised_signals=supervised_signals)
self.nt_states = nt_states
self.t_states = t_states
self.all_states = nt_states + t_states
self.dim = state_dim
self.register_parameter('t_emb', self.t_emb)
self.register_parameter('nt_emb', self.nt_emb)
if seperate_nt_emb_for_emission:
self.nt_emb_emission = nn.Parameter(torch.randn(nt_states, state_dim))
self.register_parameter('nt_emb_emission', self.nt_emb_emission)
else:
self.nt_emb_emission = None
self.register_parameter('root_emb', self.root_emb)
self.head_first = head_first
self.variant = variant
if(not head_first):
self.rule_mlp = nn.Linear(state_dim+state_dim+z_dim, 2 * self.all_states**2)
else:
if self.variant == 'I':
self.head_mlp = nn.Linear(state_dim+state_dim+z_dim, 2 * self.all_states)
self.rule_mlp = nn.Linear(state_dim+state_dim+z_dim, self.all_states**2)
elif self.variant == 'II':
self.head_mlp = nn.Sequential(nn.Linear(state_dim+state_dim+z_dim, state_dim),
ResidualLayer(state_dim, state_dim),
nn.Linear(state_dim, 2 * self.all_states))
self.rule_mlp = nn.Linear(state_dim+state_dim+z_dim, self.all_states**2)
elif self.variant == 'III':
self.head_mlp = nn.Sequential(nn.Linear(state_dim+state_dim+z_dim, state_dim),
ResidualLayer(state_dim, state_dim),
nn.Linear(state_dim, 2 * self.all_states))
self.rule_mlp = nn.Linear(state_dim+z_dim, self.all_states**2)
elif self.variant == 'IV':
self.head_mlp = nn.Sequential(nn.Linear(state_dim+state_dim+z_dim, state_dim),
ResidualLayer(state_dim, state_dim),
nn.Linear(state_dim, 2 * self.all_states))
self.left_rule_mlp = nn.Linear(state_dim+state_dim+z_dim, self.all_states**2)
self.right_rule_mlp = nn.Linear(state_dim+state_dim+z_dim, self.all_states**2)
else:
raise NotImplementedError
self.word_emb = nn.Embedding(vocab, state_dim)
if not pretrained_word_emb is None:
self.word_emb.load_state_dict({'weight':torch.from_numpy(pretrained_word_emb)})
if freeze_word_emb:
self.word_emb.weight.requires_grad = False
self.root_mlp = nn.Sequential(nn.Linear(z_dim + state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
nn.Linear(state_dim, self.nt_states))
if z_dim > 0:
self.enc_emb = nn.Embedding(vocab, w_dim)
self.enc_rnn = nn.LSTM(w_dim, h_dim, bidirectional=True, num_layers = 1, batch_first = True)
self.enc_params = nn.Linear(h_dim*2, z_dim*2)
self.z_dim = z_dim
self.flow_word_emb = flow_word_emb
if self.flow_word_emb:
self.vocab_mlp = nn.Sequential(nn.Linear(z_dim + state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
ResidualLayer(state_dim, state_dim))
self.emit_prob = FlowWordEmission(state_dim, vocab, couple_layers, cell_layers, state_dim)
self.word_emb.weight.requires_grad = False
if tie_word_emb:
self.emit_prob.word_emb.weight = self.word_emb.weight
else:
self.vocab_mlp = nn.Sequential(nn.Linear(z_dim + state_dim, state_dim),
ResidualLayer(state_dim, state_dim),
ResidualLayer(state_dim, state_dim))
self.emit_prob = nn.Linear(state_dim, vocab)
if tie_word_emb:
self.emit_prob.weight = self.word_emb.weight
# if(scalar_dir_scores):
# self.scalar_dir_scores = nn.Parameter(torch.randn(nt_states, self.all_states, self.all_states))
# self.register_parameter('scalar_dir_scores', self.scalar_dir_scores)
# else:
# self.dir_mlp = nn.Linear(state_dim+z_dim, self.all_states**2)
# self.scalar_dir_scores = None
def enc(self, x):
emb = self.enc_emb(x)
h, _ = self.enc_rnn(emb)
params = self.enc_params(h.max(1)[0])
mean = params[:, :self.z_dim]
logvar = params[:, self.z_dim:]
return mean, logvar
def kl(self, mean, logvar):
result = -0.5 * (logvar - torch.pow(mean, 2)- torch.exp(logvar) + 1)
return result
def forward(self, x, argmax=False, use_mean=False, gold_tree=None):
#x : batch x n
n = x.size(1)
batch_size = x.size(0)
if self.z_dim > 0:
mean, logvar = self.enc(x)
kl = self.kl(mean, logvar).sum(1)
z = mean.new(batch_size, mean.size(1)).normal_(0, 1)
z = (0.5*logvar).exp()*z + mean
kl = self.kl(mean, logvar).sum(1)
if use_mean:
z = mean
self.z = z
else:
self.z = torch.zeros(batch_size, 1).cuda()
t_emb = self.t_emb
nt_emb = self.nt_emb
nt_emb_emission = self.nt_emb_emission
root_emb = self.root_emb
root_emb = root_emb.expand(batch_size, self.state_dim)
t_emb = t_emb.unsqueeze(0).unsqueeze(1).expand(batch_size, n, self.t_states, self.state_dim)
nt_emb = nt_emb.unsqueeze(0).expand(batch_size, self.nt_states, self.state_dim)
nt_emb_emission = nt_emb_emission.unsqueeze(0).expand(batch_size, self.nt_states, self.state_dim) \
if not nt_emb_emission is None else None
if self.z_dim > 0:
root_emb = torch.cat([root_emb, z], 1)
z_expand = z.unsqueeze(1).expand(batch_size, n, self.z_dim)
z_expand = z_expand.unsqueeze(2).expand(batch_size, n, self.t_states, self.z_dim)
t_emb = torch.cat([t_emb, z_expand], 3)
nt_emb = torch.cat([nt_emb, z.unsqueeze(1).expand(batch_size, self.nt_states,
self.z_dim)], 2)
nt_emb_emission = torch.cat([nt_emb_emission, z.unsqueeze(1).expand(batch_size, self.nt_states,
self.z_dim)], 2) \
if not nt_emb_emission is None else None
root_scores = F.log_softmax(self.root_mlp(root_emb), 1)
if nt_emb_emission is None:
T_emb = torch.cat([nt_emb.unsqueeze(1).expand(-1, n, -1, -1),
t_emb], dim=2)
else:
T_emb = torch.cat([nt_emb_emission.unsqueeze(1).expand(-1, n, -1, -1),
t_emb], dim=2)
if(self.flow_word_emb):
unary = self.emit_prob(self.vocab_mlp(T_emb), x)
else:
unary_scores = F.log_softmax(self.emit_prob(self.vocab_mlp(T_emb)), 3)
x_expand = x.unsqueeze(2).expand(batch_size, x.size(1), self.all_states).unsqueeze(3)
unary = torch.gather(unary_scores, 3, x_expand).squeeze(3)
unary = unary.refine_names('B', 'H', 'T')
x_emb = self.word_emb(x)
nt_x_emb = torch.cat([x_emb.unsqueeze(1).expand(-1, self.nt_states, -1, -1),
nt_emb.unsqueeze(2).expand(-1, -1, n, -1)], dim=3)
if(not self.head_first):
rule_score = F.log_softmax(self.rule_mlp(nt_x_emb), 3) # nt x t**2
rule_scores = rule_score.view(batch_size, self.nt_states, n, self.all_states, self.all_states, 2)
else:
if self.variant in ['I', 'II']:
rule_score = self.rule_mlp(nt_x_emb) # nt x t**2
rule_scores = rule_score.view(batch_size, self.nt_states, n, self.all_states, self.all_states)
head_score = F.log_softmax(self.head_mlp(nt_x_emb), 3) # nt x t**2
head_scores = head_score.view(batch_size, self.nt_states, n, self.all_states, 2)
left_scores = F.log_softmax(rule_scores, dim=4).unsqueeze(-1)
right_scores = F.log_softmax(rule_scores, dim=3).unsqueeze(-1)
rule_scores = torch.cat([head_scores[:, :, :, :, 0:1].unsqueeze(4) + left_scores,
head_scores[:, :, :, :, 1:2].unsqueeze(3) + right_scores], dim=-1)
elif self.variant == 'III':
rule_score = self.rule_mlp(nt_emb.unsqueeze(2).expand(-1, -1, n, -1)) # nt x t**2
rule_scores = rule_score.view(batch_size, self.nt_states, n, self.all_states, self.all_states)
head_score = F.log_softmax(self.head_mlp(nt_x_emb), 3) # nt x t**2
head_scores = head_score.view(batch_size, self.nt_states, n, self.all_states, 2)
left_scores = F.log_softmax(rule_scores, dim=4).unsqueeze(-1)
right_scores = F.log_softmax(rule_scores, dim=3).unsqueeze(-1)
rule_scores = torch.cat([head_scores[:, :, :, :, 0:1].unsqueeze(4) + left_scores,
head_scores[:, :, :, :, 1:2].unsqueeze(3) + right_scores], dim=-1)
elif self.variant == 'IV':
left_rule_score = self.left_rule_mlp(nt_x_emb) # nt x t**2
right_rule_score = self.right_rule_mlp(nt_x_emb) # nt x t**2
left_rule_scores = left_rule_score.view(batch_size, self.nt_states, n, self.all_states, self.all_states)
right_rule_scores = right_rule_score.view(batch_size, self.nt_states, n, self.all_states, self.all_states)
head_score = F.log_softmax(self.head_mlp(nt_x_emb), 3) # nt x t**2
head_scores = head_score.view(batch_size, self.nt_states, n, self.all_states, 2)
left_scores = F.log_softmax(left_rule_scores, dim=4).unsqueeze(-1)
right_scores = F.log_softmax(right_rule_scores, dim=3).unsqueeze(-1)
rule_scores = torch.cat([head_scores[:, :, :, :, 0:1].unsqueeze(4) + left_scores,
head_scores[:, :, :, :, 1:2].unsqueeze(3) + right_scores], dim=-1)
else:
raise NotImplementedError
# if self.scalar_dir_scores is None:
# dir_score = self.dir_mlp(nt_emb).view(batch_size, self.nt_states, self.all_states, self.all_states)
# else:
# dir_score = self.scalar_dir_scores.unsqueeze(0).repeat(batch_size, 1, 1, 1)
# dir_scores = F.logsigmoid(torch.stack([dir_score, -dir_score]))
rule_scores = rule_scores.refine_names('B', 'T', 'H', 'TL', 'TR', 'D')
root_scores = root_scores.refine_names('B', 'T')
# dir_scores = dir_scores.refine_names('D', 'B', 'T', 'TL', 'TR').align_to('D', 'B', 'T', 'H', 'TL', 'TR')
log_Z = self.pcfg._inside(unary_scores = unary,
rule_scores = rule_scores,
root_scores = root_scores,
gold_tree=gold_tree)
if self.z_dim == 0:
kl = torch.zeros_like(log_Z)
if(log_Z.sum().item() > -0.1):
pdb.set_trace()
if argmax:
with torch.no_grad():
max_score, binary_matrix, spans = self.pcfg._viterbi(unary_scores = unary,
rule_scores = rule_scores,
root_scores = root_scores)
self.tags = self.pcfg.argmax_tags
return -log_Z, kl, binary_matrix, spans
else:
return -log_Z, kl
| 16,808 | 46.752841 | 116 | py |
neural-lpcfg | neural-lpcfg-master/train.py | #!/usr/bin/env python3
import sys
import os
import argparse
import json
import random
import shutil
import copy
from collections import defaultdict
import torch
from torch import cuda
import numpy as np
import time
import logging
from data import Dataset
from utils import *
from models import CompPCFG, LexicalizedCompPCFG
from torch.nn.init import xavier_uniform_
from torch.utils.tensorboard import SummaryWriter
try:
from apex import amp
APEX_AVAILABLE = True
except ModuleNotFoundError:
APEX_AVAILABLE = False
import pdb
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
# Program options
parser.add_argument('--mode', default='train', help='train/test')
parser.add_argument('--test_file', default='data/preprocessed/ptb-test.pkl')
# Data path options
parser.add_argument('--train_file', default='data/preprocessed/ptb-train.pkl')
parser.add_argument('--val_file', default='data/preprocessed/ptb-val.pkl')
parser.add_argument('--save_path', default='compound-pcfg.pt', help='where to save the model')
parser.add_argument('--pretrained_word_emb', default="", help="word emb file")
# Model options
parser.add_argument('--model', default='LexicalizedCompPCFG', type=str, help='model name')
parser.add_argument('--load_model', default='', type=str, help='checkpoint file of stored model')
parser.add_argument('--init_gain', default=1., type=float, help='gain of xaviar initialization')
parser.add_argument('--init_model', default='', help='initial lexicalized pcfg with compound pcfg')
# Generative model parameters
parser.add_argument('--z_dim', default=64, type=int, help='latent dimension')
parser.add_argument('--t_states', default=60, type=int, help='number of preterminal states')
parser.add_argument('--nt_states', default=30, type=int, help='number of nonterminal states')
parser.add_argument('--state_dim', default=256, type=int, help='symbol embedding dimension')
parser.add_argument('--nt_emission', action="store_true", help='allow a single word span with a non-terminal')
parser.add_argument('--scalar_dir_scores', action="store_true", help='using scalar dir scores instead neural ones')
parser.add_argument('--seperate_nt_emb_for_emission', action="store_true", help='seperate nt embeddings for emission probability')
parser.add_argument('--head_first', action="store_true", help="first generate head and direction")
parser.add_argument('--tie_word_emb', action="store_true", help="tie the word embeddings")
parser.add_argument('--flow_word_emb', action="store_true", help="emit words via invertible flow")
parser.add_argument('--freeze_word_emb', action="store_true", help="freeze word embeddings")
# Inference network parameters
parser.add_argument('--h_dim', default=512, type=int, help='hidden dim for variational LSTM')
parser.add_argument('--w_dim', default=512, type=int, help='embedding dim for variational LSTM')
# Optimization options
parser.add_argument('--num_epochs', default=10, type=int, help='number of training epochs')
parser.add_argument('--lr', default=0.001, type=float, help='starting learning rate')
parser.add_argument('--delay_step', default=1, type=int, help='number of backprop before step')
parser.add_argument('--max_grad_norm', default=3, type=float, help='gradient clipping parameter')
parser.add_argument('--max_length', default=30, type=float, help='max sentence length cutoff start')
parser.add_argument('--len_incr', default=1, type=int, help='increment max length each epoch')
parser.add_argument('--final_max_length', default=40, type=int, help='final max length cutoff')
parser.add_argument('--eval_max_length', default=None, type=int, help='max length in evaluation. set to the same as final_max_length by default')
parser.add_argument('--beta1', default=0.75, type=float, help='beta1 for adam')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 for adam')
parser.add_argument('--gpu', default=0, type=int, help='which gpu to use')
parser.add_argument('--seed', default=3435, type=int, help='random seed')
parser.add_argument('--print_every', type=int, default=1000, help='print stats after N batches')
parser.add_argument('--supervised_signals', nargs="*", default = [], help="supervised signals to use")
parser.add_argument('--opt_level', type=str, default="O0", help="mixed precision")
parser.add_argument('--t_emb_init', type=str, default="", help="initial value of t_emb")
parser.add_argument('--vocab_mlp_identity_init', action='store_true', help="initialize vocab_mlp as identity function")
# Evaluation optiones
parser.add_argument('--evaluate_dep', action='store_true', help='evaluate dependency parsing results')
parser.add_argument('--log_dir', type=str, default="", help='tensorboard logdir')
args = parser.parse_args()
if(args.eval_max_length is None):
args.eval_max_length = args.final_max_length
# tensorboard
if(args.log_dir == ""):
writer = SummaryWriter()
else:
writer = SummaryWriter(log_dir=args.log_dir)
global_step = 0
def add_scalars(main_tag, tag_scalar_dict, global_step):
for tag in tag_scalar_dict:
writer.add_scalar("{}/{}".format(main_tag, tag), tag_scalar_dict[tag], global_step)
def main(args):
global global_step
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if(args.mode == 'train'):
train_data = Dataset(args.train_file, load_dep=args.evaluate_dep)
val_data = Dataset(args.val_file, load_dep=args.evaluate_dep)
train_sents = train_data.batch_size.sum()
vocab_size = int(train_data.vocab_size)
max_len = max(val_data.sents.size(1), train_data.sents.size(1))
print('Train: %d sents / %d batches, Val: %d sents / %d batches' %
(train_data.sents.size(0), len(train_data), val_data.sents.size(0), len(val_data)))
if(not args.pretrained_word_emb == ""):
pretrained_word_emb_matrix = get_word_emb_matrix(args.pretrained_word_emb, train_data.idx2word)
else:
pretrained_word_emb_matrix = None
else:
test_data = Dataset(args.test_file, load_dep=args.evaluate_dep)
vocab_size = int(test_data.vocab_size)
max_len = test_data.sents.size(1)
print("Test: %d sents / %d batches" % (test_data.sents.size(0), len(test_data)))
if(not args.pretrained_word_emb == ""):
pretrained_word_emb_matrix = get_word_emb_matrix(args.pretrained_word_emb, test_data.idx2word)
else:
pretrained_word_emb_matrix = None
print('Vocab size: %d, Max Sent Len: %d' % (vocab_size, max_len))
print('Save Path', args.save_path)
cuda.set_device(args.gpu)
if(args.model == 'CompPCFG'):
model = CompPCFG(vocab = vocab_size,
state_dim = args.state_dim,
t_states = args.t_states,
nt_states = args.nt_states,
h_dim = args.h_dim,
w_dim = args.w_dim,
z_dim = args.z_dim)
init_model = None
elif(args.model == 'LexicalizedCompPCFG'):
if args.init_model != '':
init_model = CompPCFG(vocab = vocab_size,
state_dim = args.state_dim,
t_states = args.t_states,
nt_states = args.nt_states,
h_dim = args.h_dim,
w_dim = args.w_dim,
z_dim = args.z_dim)
init_model.load_state_dict(torch.load(args.init_model)["model"])
args.supervised_signals = ["phrase", "tag", "nt"]
else:
init_model = None
model = LexicalizedCompPCFG(vocab = vocab_size,
state_dim = args.state_dim,
t_states = args.t_states,
nt_states = args.nt_states,
h_dim = args.h_dim,
w_dim = args.w_dim,
z_dim = args.z_dim,
nt_emission=args.nt_emission,
scalar_dir_scores=args.scalar_dir_scores,
seperate_nt_emb_for_emission=args.seperate_nt_emb_for_emission,
head_first=args.head_first,
tie_word_emb=args.tie_word_emb,
flow_word_emb=args.flow_word_emb,
freeze_word_emb=args.freeze_word_emb,
pretrained_word_emb=pretrained_word_emb_matrix,
supervised_signals=args.supervised_signals)
else:
raise NotImplementedError
for name, param in model.named_parameters():
if param.dim() > 1:
xavier_uniform_(param, args.init_gain)
if(args.t_emb_init != ""):
t_emb_init = np.loadtxt(args.t_emb_init)
model.t_emb.data.copy_(torch.from_numpy(t_emb_init))
if(args.vocab_mlp_identity_init):
model.vocab_mlp[0].bias.data.copy_(torch.zeros(args.state_dim))
model.vocab_mlp[0].weight.data.copy_(torch.cat([torch.eye(args.state_dim, args.state_dim), torch.zeros(args.state_dim, args.z_dim)], dim=1))
if(args.load_model != ''):
print("Loading model from {}.".format(args.load_model))
model.load_state_dict(torch.load(args.load_model)["model"])
print("Model loaded from {}.".format(args.load_model))
print("model architecture")
print(model)
model.train()
model.cuda()
if init_model:
init_model.eval()
init_model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas = (args.beta1, args.beta2))
if args.opt_level != "O0":
model.pcfg.huge = 1e4
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.opt_level,
keep_batchnorm_fp32=True, loss_scale="dynamic"
)
if(args.mode == "test"):
print('--------------------------------')
print('Checking validation perf...')
test_ppl, test_f1 = eval(test_data, model)
print('--------------------------------')
return
best_val_ppl = 1e5
best_val_f1 = 0
epoch = 0
while epoch < args.num_epochs:
start_time = time.time()
epoch += 1
print('Starting epoch %d' % epoch)
train_nll = 0.
train_kl = 0.
num_sents = 0.
num_words = 0.
all_stats = [[0., 0., 0.]]
if(args.evaluate_dep):
dep_stats = [[0., 0., 0.]]
b = b_ = 0
optimization_delay_count_down = args.delay_step
for i in np.random.permutation(len(train_data)):
b += 1
gold_tree = None
if(not args.evaluate_dep):
sents, length, batch_size, _, _, gold_spans, gold_binary_trees, _ = train_data[i]
else:
sents, length, batch_size, gold_tags, gold_actions, gold_spans, gold_binary_trees, _, heads = train_data[i]
if(len(args.supervised_signals)):
gold_tree = []
for j in range(len(heads)):
gold_tree.append(get_span2head(gold_spans[j], heads[j], gold_actions=gold_actions[j], gold_tags=gold_tags[j]))
for span, (head, label) in gold_tree[j].items():
if(span[0] == span[1]):
gold_tree[j][span] = (head, PT2ID[label])
else:
f = lambda x : x[:x.find('-')] if x.find('-') != -1 else x
g = lambda y : y[:y.find('=')] if y.find('=') != -1 else y
gold_tree[j][span] = (head, NT2ID[f(g(label))])
if length > args.max_length or length == 1: #length filter based on curriculum
continue
b_ += 1
sents = sents.cuda()
if init_model:
gold_tree = []
with torch.no_grad():
_, _, _, argmax_spans = init_model(sents, argmax=True)
for j in range(len(argmax_spans)):
gold_tree.append({})
for span in argmax_spans[j]:
if(span[0] == span[1]):
gold_tree[j][(span[0], span[1])] = (-1, span[2] - args.nt_states)
else:
gold_tree[j][(span[0], span[1])] = (-1, span[2])
nll, kl, binary_matrix, argmax_spans = model(sents, argmax=True, gold_tree=gold_tree)
loss = (nll + kl).mean()
if(args.opt_level != "O0"):
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_nll += nll.sum().item()
train_kl += kl.sum().item()
if(optimization_delay_count_down == 1):
if args.max_grad_norm > 0:
if args.opt_level == "O0":
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(amp.master_params(
optimizer), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
optimization_delay_count_down = args.delay_step
else:
optimization_delay_count_down -= 1
num_sents += batch_size
num_words += batch_size * (length + 1) # we implicitly generate </s> so we explicitly count it
for bb in range(batch_size):
span_b = [(a[0], a[1]) for a in argmax_spans[bb] if a[0] != a[1]] #ignore labels
span_b_set = set(span_b[:-1])
update_stats(span_b_set, [set(gold_spans[bb][:-1])], all_stats)
if(args.evaluate_dep):
update_dep_stats(argmax_spans[bb], heads[bb], dep_stats)
if b_ % args.print_every == 0:
all_f1 = get_f1(all_stats)
dir_acc, undir_acc = get_dep_acc(dep_stats) if args.evaluate_dep else (0., 0.)
param_norm = sum([p.norm()**2 for p in model.parameters()]).item()**0.5
gparam_norm = sum([p.grad.norm()**2 for p in model.parameters()
if p.grad is not None]).item()**0.5
log_str = 'Epoch: %d, Batch: %d/%d, |Param|: %.6f, |GParam|: %.2f, LR: %.4f, ' + \
'ReconPPL: %.2f, NLLloss: %.4f, KL: %.4f, PPLBound: %.2f, ValPPL: %.2f, ValF1: %.2f, ' + \
'CorpusF1: %.2f, DirAcc: %.2f, UndirAcc: %.2f, Throughput: %.2f examples/sec'
print(log_str %
(epoch, b, len(train_data), param_norm, gparam_norm, args.lr,
np.exp(train_nll / num_words), train_nll / num_words, train_kl /num_sents,
np.exp((train_nll + train_kl)/num_words), best_val_ppl, best_val_f1,
all_f1[0], dir_acc, undir_acc, num_sents / (time.time() - start_time)))
# print an example parse
tree = get_tree_from_binary_matrix(binary_matrix[0], length)
action = get_actions(tree)
sent_str = [train_data.idx2word[word_idx] for word_idx in list(sents[0].cpu().numpy())]
if(args.evaluate_dep):
print("Pred Tree: %s" % get_tagged_parse(get_tree(action, sent_str), argmax_spans[0]))
else:
print("Pred Tree: %s" % get_tree(action, sent_str))
print("Gold Tree: %s" % get_tree(gold_binary_trees[0], sent_str))
# tensorboard
global_step += args.print_every
add_scalars(main_tag="train",
tag_scalar_dict={"ParamNorm": param_norm,
"ParamGradNorm": gparam_norm,
"ReconPPL": np.exp(train_nll / num_words),
"KL": train_kl /num_sents,
"PPLBound": np.exp((train_nll + train_kl)/num_words),
"CorpusF1": all_f1[0],
"DirAcc": dir_acc,
"UndirAcc": undir_acc,
"Throughput (examples/sec)": num_sents / (time.time() - start_time),
"GPU memory usage": torch.cuda.memory_allocated()},
global_step=global_step)
if(args.evaluate_dep):
writer.add_text("Pred Tree", get_tagged_parse(get_tree(action, sent_str), argmax_spans[0]), global_step)
else:
writer.add_text("Pred Tree", get_tree(action, sent_str), global_step)
writer.add_text("Gold Tree", get_tree(gold_binary_trees[0], sent_str), global_step)
args.max_length = min(args.final_max_length, args.max_length + args.len_incr)
print('--------------------------------')
print('Checking validation perf...')
val_ppl, val_f1 = eval(val_data, model)
print('--------------------------------')
if val_ppl < best_val_ppl:
best_val_ppl = val_ppl
best_val_f1 = val_f1
checkpoint = {
'args': args.__dict__,
'model': model.cpu().state_dict(),
'word2idx': train_data.word2idx,
'idx2word': train_data.idx2word
}
print('Saving checkpoint to %s' % args.save_path)
torch.save(checkpoint, args.save_path)
model.cuda()
def eval(data, model):
global global_step
model.eval()
num_sents = 0
num_words = 0
total_nll = 0.
total_kl = 0.
corpus_f1 = [0., 0., 0.]
corpus_f1_by_cat = [defaultdict(int), defaultdict(int), defaultdict(int)]
dep_stats = [[0., 0., 0.]]
sent_f1 = []
# f = open("tmp.txt", "w")
with torch.no_grad():
for i in range(len(data)):
if(not args.evaluate_dep):
sents, length, batch_size, _, gold_actions, gold_spans, gold_binary_trees, other_data = data[i]
else:
sents, length, batch_size, gold_tags, gold_actions, gold_spans, gold_binary_trees, other_data, heads = data[i]
span_dicts = []
for j in range(batch_size):
span_dict = {}
for l, r, nt in get_nonbinary_spans_label(gold_actions[j])[0]:
span_dict[(l, r)] = nt
span_dicts.append(span_dict)
if length == 1 or length > args.eval_max_length:
continue
sents = sents.cuda()
# note that for unsuperised parsing, we should do model(sents, argmax=True, use_mean = True)
# but we don't for eval since we want a valid upper bound on PPL for early stopping
# see eval.py for proper MAP inference
nll, kl, binary_matrix, argmax_spans = model(sents, argmax=True)
total_nll += nll.sum().item()
total_kl += kl.sum().item()
num_sents += batch_size
num_words += batch_size*(length +1) # we implicitly generate </s> so we explicitly count it
gold_tree = []
for j in range(len(heads)):
gold_tree.append(get_span2head(gold_spans[j], heads[j], gold_actions=gold_actions[j], gold_tags=gold_tags[j]))
for span, (head, label) in gold_tree[j].items():
if(span[0] == span[1]):
gold_tree[j][span] = (head, PT2ID[label])
else:
f = lambda x : x[:x.find('-')] if x.find('-') != -1 else x
g = lambda y : y[:y.find('=')] if y.find('=') != -1 else y
gold_tree[j][span] = (head, f(g(label)))
for b in range(batch_size):
# for a in argmax_spans[b]:
# if((a[0], a[1]) in span_dicts[b]):
# f.write("{}\t{}\n".format(a[2], span_dicts[b][(a[0], a[1])]))
span_b = [(a[0], a[1]) for a in argmax_spans[b] if a[0] != a[1]] #ignore labels
span_b_set = set(span_b[:-1])
gold_b_set = set(gold_spans[b][:-1])
tp, fp, fn = get_stats(span_b_set, gold_b_set)
corpus_f1[0] += tp
corpus_f1[1] += fp
corpus_f1[2] += fn
tp_by_cat, all_by_cat = get_stats_by_cat(span_b_set, gold_b_set, gold_tree[b])
for j in tp_by_cat:
corpus_f1_by_cat[0][j] += tp_by_cat[j]
for j in all_by_cat:
corpus_f1_by_cat[1][j] += all_by_cat[j]
# sent-level F1 is based on L83-89 from https://github.com/yikangshen/PRPN/test_phrase_grammar.py
model_out = span_b_set
std_out = gold_b_set
overlap = model_out.intersection(std_out)
prec = float(len(overlap)) / (len(model_out) + 1e-8)
reca = float(len(overlap)) / (len(std_out) + 1e-8)
if len(std_out) == 0:
reca = 1.
if len(model_out) == 0:
prec = 1.
f1 = 2 * prec * reca / (prec + reca + 1e-8)
sent_f1.append(f1)
if(args.evaluate_dep):
update_dep_stats(argmax_spans[b], heads[b], dep_stats)
tp, fp, fn = corpus_f1
prec = tp / (tp + fp)
recall = tp / (tp + fn)
corpus_f1 = 2*prec*recall/(prec+recall) if prec+recall > 0 else 0.
for j in corpus_f1_by_cat[1]:
corpus_f1_by_cat[2][j] = corpus_f1_by_cat[0] / corpus_f1_by_cat[1]
sent_f1 = np.mean(np.array(sent_f1))
dir_acc, undir_acc = get_dep_acc(dep_stats) if args.evaluate_dep else (0., 0.)
recon_ppl = np.exp(total_nll / num_words)
ppl_elbo = np.exp((total_nll + total_kl)/num_words)
kl = total_kl /num_sents
print('ReconPPL: %.2f, KL: %.4f, NLLloss: %.4f, PPL (Upper Bound): %.2f' %
(recon_ppl, kl, total_nll / num_words, ppl_elbo))
print('Corpus F1: %.2f, Sentence F1: %.2f' %
(corpus_f1*100, sent_f1*100))
if(args.evaluate_dep):
print('DirAcc: %.2f, UndirAcc: %.2f'%(dir_acc, undir_acc))
print('Corpus Recall by Category: {}'.format(corpus_f1_by_cat[2]))
# tensorboard
add_scalars(main_tag="validation",
tag_scalar_dict={"ReconPPL": recon_ppl,
"KL": kl,
"PPL (Upper Bound)": ppl_elbo,
"Corpus F1": corpus_f1 * 100,
"Sentence F1": sent_f1*100,
"DirAcc": dir_acc if args.evaluate_dep else 0,
"UndirAcc": undir_acc if args.evaluate_dep else 0},
global_step=global_step)
model.train()
return ppl_elbo, sent_f1*100
if __name__ == '__main__':
main(args)
| 21,640 | 46.045652 | 145 | py |
neural-lpcfg | neural-lpcfg-master/PCFG.py | #!/usr/bin/env python3
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import itertools
import random
class PCFG(nn.Module):
def __init__(self, nt_states, t_states):
super(PCFG, self).__init__()
self.nt_states = nt_states
self.t_states = t_states
self.states = nt_states + t_states
self.huge = 1e9
def logadd(self, x, y):
d = torch.max(x,y)
return torch.log(torch.exp(x-d) + torch.exp(y-d)) + d
def logsumexp(self, x, dim=1):
d = torch.max(x, dim)[0]
if x.dim() == 1:
return torch.log(torch.exp(x - d).sum(dim)) + d
else:
return torch.log(torch.exp(x - d.unsqueeze(dim).expand_as(x)).sum(dim)) + d
def _inside(self, unary_scores, rule_scores, root_scores):
#inside step
#unary scores : b x n x T
#rule scores : b x NT x (NT+T) x (NT+T)
#root : b x NT
# statistics
batch_size = unary_scores.size(0)
n = unary_scores.size(1)
# uses conventional python numbering scheme: [s, t] represents span [s, t)
# this scheme facilitates fast computation
# f[s, t] = logsumexp(f[s, :] * f[:, t])
self.beta = unary_scores.new(batch_size, n + 1, n + 1, self.states).fill_(-self.huge)
# initialization: f[k, k+1]
for k in range(n):
for state in range(self.t_states):
self.beta[:, k, k+1, self.nt_states + state] = unary_scores[:, k, state]
# span length w, at least 2
for w in np.arange(2, n+1):
# start point s
for s in range(n-w+1):
t = s + w
f = lambda x:torch.logsumexp(x.view(batch_size, self.nt_states, -1), dim=2)
if w == 2:
tmp = self.beta[:, s, s+1, self.nt_states:].unsqueeze(2).unsqueeze(1) \
+ self.beta[:, s+1, t, self.nt_states:].unsqueeze(1).unsqueeze(2) \
+ rule_scores[:, :, self.nt_states:, self.nt_states:]
tmp = f(tmp)
elif w == 3:
tmp1 = self.beta[:, s, s+1, self.nt_states:].unsqueeze(2).unsqueeze(1) \
+ self.beta[:, s+1, t, :self.nt_states].unsqueeze(1).unsqueeze(2) \
+ rule_scores[:, :, self.nt_states:, :self.nt_states]
tmp2 = self.beta[:, s, t-1, :self.nt_states].unsqueeze(2).unsqueeze(1) \
+ self.beta[:, t-1, t, self.nt_states:].unsqueeze(1).unsqueeze(2) \
+ rule_scores[:, :, :self.nt_states, self.nt_states:]
tmp = self.logadd(f(tmp1), f(tmp2))
elif w >= 4:
tmp1 = self.beta[:, s, s+1, self.nt_states:].unsqueeze(2).unsqueeze(1) \
+ self.beta[:, s+1, t, :self.nt_states].unsqueeze(1).unsqueeze(2) \
+ rule_scores[:, :, self.nt_states:, :self.nt_states]
tmp2 = self.beta[:, s, t-1, :self.nt_states].unsqueeze(2).unsqueeze(1) \
+ self.beta[:, t-1, t, self.nt_states:].unsqueeze(1).unsqueeze(2) \
+ rule_scores[:, :, :self.nt_states, self.nt_states:]
tmp3 = self.beta[:, s, s+2:t-1, :self.nt_states].unsqueeze(3).unsqueeze(1) \
+ self.beta[:, s+2:t-1, t, :self.nt_states].unsqueeze(1).unsqueeze(3) \
+ rule_scores[:, :, :self.nt_states, :self.nt_states].unsqueeze(2)
tmp = self.logadd(self.logadd(f(tmp1), f(tmp2)), f(tmp3))
self.beta[:, s, t, :self.nt_states] = tmp
log_Z = self.beta[:, 0, n, :self.nt_states] + root_scores
log_Z = self.logsumexp(log_Z, 1)
return log_Z
def _viterbi(self, unary_scores, rule_scores, root_scores):
#unary scores : b x n x T
#rule scores : b x NT x (NT+T) x (NT+T)
batch_size = unary_scores.size(0)
n = unary_scores.size(1)
# dummy rules
rule_scores = torch.cat([rule_scores, \
rule_scores.new(batch_size, self.t_states, self.states, self.states) \
.fill_(-self.huge)], dim=1)
self.scores = unary_scores.new(batch_size, n+1, n+1, self.states).fill_(-self.huge)
self.bp = unary_scores.new(batch_size, n+1, n+1, self.states).fill_(-1)
self.left_bp = unary_scores.new(batch_size, n+1, n+1, self.states).fill_(-1)
self.right_bp = unary_scores.new(batch_size, n+1, n+1, self.states).fill_(-1)
self.argmax = unary_scores.new(batch_size, n, n).fill_(-1)
self.argmax_tags = unary_scores.new(batch_size, n).fill_(-1)
self.spans = [[] for _ in range(batch_size)]
for k in range(n):
for state in range(self.t_states):
self.scores[:, k, k + 1, self.nt_states + state] = unary_scores[:, k, state]
for w in np.arange(2, n+1):
for s in range(n-w+1):
t = s + w
tmp = self.scores[:, s, s+1:t, :].unsqueeze(3).unsqueeze(1) \
+ self.scores[:, s+1:t, t, :].unsqueeze(1).unsqueeze(3) \
+ rule_scores.unsqueeze(2)
# view once and marginalize
tmp, max_pos = torch.max(tmp.view(batch_size, self.states, -1), dim=2)
# step by step marginalization
# tmp = self.logsumexp(tmp, dim=4)
# tmp = self.logsumexp(tmp, dim=3)
# tmp = self.logsumexp(tmp, dim=2)
max_idx = max_pos / (self.states * self.states) + s + 1
left_child = (max_pos % (self.states * self.states)) / self.states
right_child = max_pos % self.states
self.scores[:, s, t, :self.nt_states] = tmp[:, :self.nt_states]
self.bp[:, s, t, :self.nt_states] = max_idx[:, :self.nt_states]
self.left_bp[:, s, t, :self.nt_states] = left_child[:, :self.nt_states]
self.right_bp[:, s, t, :self.nt_states] = right_child[:, :self.nt_states]
max_score = self.scores[:, 0, n, :self.nt_states] + root_scores
max_score, max_idx = torch.max(max_score, 1)
for b in range(batch_size):
self._backtrack(b, 0, n, max_idx[b].item())
return self.scores, self.argmax, self.spans
def _backtrack(self, b, s, t, state):
u = int(self.bp[b][s][t][state])
assert(s < t), "s: %d, t %d"%(s, t)
left_state = int(self.left_bp[b][s][t][state])
right_state = int(self.right_bp[b][s][t][state])
self.argmax[b][s][t-1] = 1
if s == t-1:
self.spans[b].insert(0, (s, t-1, state))
self.argmax_tags[b][s] = state - self.nt_states
return None
else:
self.spans[b].insert(0, (s, t-1, state))
self._backtrack(b, s, u, left_state)
self._backtrack(b, u, t, right_state)
return None
| 6,435 | 39.734177 | 99 | py |
RPMG | RPMG-main/ModelNet_PC/model.py | import torch
import torch.nn as nn
import sys
import os
from os.path import join as pjoin
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0,pjoin(BASEPATH, '../..'))
import utils.tools as tools
from pointnets import PointNet2_cls
class Model(nn.Module):
def __init__(self, out_rotation_mode="Quaternion"):
super(Model, self).__init__()
self.out_rotation_mode = out_rotation_mode
if(out_rotation_mode == "Quaternion"):
self.out_channel = 4
elif (out_rotation_mode == "ortho6d"):
self.out_channel = 6
elif (out_rotation_mode == "svd9d"):
self.out_channel = 9
elif (out_rotation_mode == "10d"):
self.out_channel = 10
elif out_rotation_mode == 'euler':
self.out_channel = 3
elif out_rotation_mode == 'axisangle':
self.out_channel = 4
else:
raise NotImplementedError
print(out_rotation_mode)
self.model = PointNet2_cls(self.out_channel)
#pt b*point_num*3
def forward(self, input):
out_nd = self.model(input)
if(self.out_rotation_mode == "Quaternion"):
out_rmat = tools.compute_rotation_matrix_from_quaternion(out_nd) #b*3*3
elif(self.out_rotation_mode=="ortho6d"):
out_rmat = tools.compute_rotation_matrix_from_ortho6d(out_nd) #b*3*3
elif(self.out_rotation_mode=="svd9d"):
out_rmat = tools.symmetric_orthogonalization(out_nd) # b*3*3
elif (self.out_rotation_mode == "10d"):
out_rmat = tools.compute_rotation_matrix_from_10d(out_nd) # b*3*3
elif (self.out_rotation_mode == "euler"):
out_rmat = tools.compute_rotation_matrix_from_euler(out_nd) # b*3*3
elif (self.out_rotation_mode == "axisangle"):
out_rmat = tools.compute_rotation_matrix_from_axisAngle(out_nd) # b*3*3
return out_rmat, out_nd
| 2,032 | 29.343284 | 84 | py |
RPMG | RPMG-main/ModelNet_PC/pointnet_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
def timeit(tag, t):
print("{}: {}s".format(tag, time() - t))
return time()
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
B, N, C = xyz.shape
S = npoint
fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]
new_xyz = index_points(xyz, fps_idx)
idx = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, idx)
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]
else:
new_points = grouped_xyz_norm
if returnfps:
return new_xyz, new_points, grouped_xyz, fps_idx
else:
return new_xyz, new_points
def sample_and_group_all(xyz, points):
"""
Input:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, 3]
new_points: sampled points data, [B, 1, N, 3+D]
"""
device = xyz.device
B, N, C = xyz.shape
new_xyz = torch.zeros(B, 1, C).to(device)
grouped_xyz = xyz.view(B, 1, N, C)
if points is not None:
new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
else:
new_points = grouped_xyz
return new_xyz, new_points
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)), inplace=True)
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointNetSetAbstractionMsg(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):
super(PointNetSetAbstractionMsg, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel + 3
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
B, N, C = xyz.shape
S = self.npoint
new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
group_idx = query_ball_point(radius, K, xyz, new_xyz)
grouped_xyz = index_points(xyz, group_idx)
grouped_xyz -= new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, group_idx)
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
else:
grouped_points = grouped_xyz
grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points)), inplace=True)
new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
new_points_list.append(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
points2 = points2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
if points1 is not None:
points1 = points1.permute(0, 2, 1)
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)), inplace=True)
return new_points
| 11,031 | 35.052288 | 104 | py |
RPMG | RPMG-main/ModelNet_PC/pointnets.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import os
import sys
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0, BASEPATH)
from pointnet_utils import PointNetSetAbstractionMsg,PointNetSetAbstraction,PointNetFeaturePropagation
class PointNet2_seg(nn.Module):
def __init__(self, out_channel):
super(PointNet2_seg, self).__init__()
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)
self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256])
self.fp2 = PointNetFeaturePropagation(in_channel=576, mlp=[256, 128])
self.fp1 = PointNetFeaturePropagation(in_channel=134, mlp=[128, 128])
self.conv1 = nn.Conv1d(128, 128, 1)
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, out_channel, 1)
def forward(self, xyz):
# Set Abstraction layers
B,C,N = xyz.shape
l0_points = xyz
l0_xyz = xyz
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
# Feature Propagation layers
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, torch.cat([l0_xyz,l0_points],1), l1_points)
# FC layers
feat = F.relu(self.bn1(self.conv1(l0_points)))
x = self.drop1(feat)
x = self.conv2(x)
return x
class PointNet_cls(nn.Module):
def __init__(self, out_channel):
super(PointNet_cls, self).__init__()
self.feature_extracter = nn.Sequential(
nn.Conv1d(3, 64, kernel_size=1),
nn.LeakyReLU(),
nn.Conv1d(64, 128, kernel_size=1),
nn.LeakyReLU(),
nn.Conv1d(128, 1024, kernel_size=1),
nn.AdaptiveMaxPool1d(output_size=1)
)
self.mlp = nn.Sequential(
nn.Linear(1024, 512),
nn.LeakyReLU(),
nn.Linear(512, out_channel))
def forward(self, x):
batch = x.shape[0]
x = self.feature_extracter(x).view(batch, -1)
out_data = self.mlp(x)
return out_data
class PointNet2_cls(nn.Module):
def __init__(self, out_channel):
super(PointNet2_cls, self).__init__()
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)
self.mlp = nn.Sequential(
nn.Linear(1024, 512),
nn.LeakyReLU(),
nn.Linear(512, out_channel))
def forward(self, xyz):
# Set Abstraction layers
B,C,N = xyz.shape
l0_points = xyz
l0_xyz = xyz
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
out_data = self.mlp(l3_points.squeeze(-1))
return out_data
class PointNet_seg(nn.Module):
def __init__(self, out_channel):
super(PointNet_seg, self).__init__()
self.f1 = nn.Sequential(
nn.Conv1d(3, 64, kernel_size=1),
nn.LeakyReLU()
)
self.f2 = nn.Sequential(
nn.Conv1d(64, 128, kernel_size=1),
nn.LeakyReLU(),
nn.Conv1d(128, 1024, kernel_size=1),
nn.AdaptiveMaxPool1d(output_size=1)
)
self.mlp = nn.Sequential(
nn.Conv1d(1088, 512, kernel_size=1),
nn.LeakyReLU(),
nn.Conv1d(512, 128, kernel_size=1),
nn.LeakyReLU(),
nn.Conv1d(128, out_channel, kernel_size=1)
)
def forward(self, x):
batch = x.shape[0]
y = self.f1(x)
z = self.f2(y)
xx = torch.cat([y,z.repeat(1,1,1024)],1)
out_data = self.mlp(xx)
return out_data
| 4,523 | 36.7 | 139 | py |
RPMG | RPMG-main/ModelNet_PC/pointnet_lib/setup.py | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='pointnet2',
ext_modules=[
CUDAExtension('pointnet2_cuda', [
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-O2']})
],
cmdclass={'build_ext': BuildExtension}
)
| 679 | 27.333333 | 67 | py |
RPMG | RPMG-main/ModelNet_PC/pointnet_lib/pointnet2_utils.py | import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
from typing import Tuple
import pointnet2_cuda as pointnet2
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
xyz = xyz.contiguous()
# assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
features = features.contiguous()
idx = idx.contiguous()
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
class KNN(Function):
@staticmethod
def forward(ctx, k: int, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, k) l2 distance to the three nearest neighbors
idx: (B, N, k) index of 3 nearest neighbors
"""
unknown = unknown.contiguous()
known = known.contiguous()
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, k)
idx = torch.cuda.IntTensor(B, N, k)
pointnet2.knn_wrapper(B, N, m, k, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None
knn = KNN.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
unknown = unknown.contiguous()
known = known.contiguous()
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
features = features.contiguous()
idx = idx.contiguous()
weight = weight.contiguous()
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
features = features.contiguous()
idx = idx.contiguous()
assert features.is_contiguous()
assert idx.is_contiguous()
idx = idx.int()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
new_xyz = new_xyz.contiguous()
xyz = xyz.contiguous()
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_features, grouped_xyz] , dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
class KNNAndGroup(nn.Module):
def __init__(self, radius:float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, idx: torch.Tensor = None, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, M, 3) centroids
:param idx: (B, M, K) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, M, K) if use_xyz = True else (B, C, M, K)
"""
##TODO: implement new_xyz into knn
if new_xyz is None:
new_xyz = xyz
if idx is None:
idx = knn(xyz, new_xyz, self.radius, self.nsample) # B, M, K
idx = idx.detach()
xyz_trans = xyz.transpose(1, 2).contiguous()
new_xyz_trans = new_xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # B, 3, M, K
grouped_xyz -= new_xyz_trans.unsqueeze(-1) # B, 3, M, K
#grouped_r = torch.norm(grouped_xyz, dim=1).max(dim=-1)[0]#B,M
#print(new_xyz.shape[1], grouped_r)
if features is not None:
grouped_features = grouping_operation(features, idx) # B, C, M, K
# grouped_features_test = grouping_operation(features, idx)
# assert (grouped_features == grouped_features).all()
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, M, K)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
| 13,245 | 33.22739 | 151 | py |
RPMG | RPMG-main/ModelNet_PC/pointnet_lib/pointnet2_modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0, BASEPATH)
CUDA = torch.cuda.is_available()
if CUDA:
import pointnet2_utils as futils
def knn_point(k, pos2, pos1):
'''
Input:
k: int32, number of k in k-nn search
pos1: (batch_size, ndataset, c) float32 array, input points
pos2: (batch_size, npoint, c) float32 array, query points
Output:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
if CUDA:
val, idx = futils.knn(k, pos2, pos1)
return val, idx.long()
B, N, C = pos1.shape
M = pos2.shape[1]
pos1 = pos1.view(B, 1, N, -1).repeat(1, M, 1, 1)
pos2 = pos2.view(B, M, 1, -1).repeat(1, 1, N, 1)
dist = torch.sum(-(pos1 - pos2) ** 2, -1)
val, idx = dist.topk(k=k, dim=-1)
return torch.sqrt(-val), idx
def three_nn(xyz1, xyz2):
if CUDA:
dists, idx = futils.three_nn(xyz1, xyz2)
return dists, idx.long()
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
return dists, idx
def three_interpolate(points, idx, weight): # points: [B, C, M], idx: [B, N, 3], returns [B, C, N]
if CUDA:
return futils.three_interpolate(points, idx.int(), weight)
B, N = idx.shape[:2]
points = points.permute(0, 2, 1) # [B, M, C] --> [B, N, 3, C]
interpolated_points = torch.sum(index_points(points, idx) * weight.view(B, N, 3, 1), dim=2)
return interpolated_points.permute(0, 2, 1)
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S] or [B, S1, S2, ..Sk]
Return:
new_points:, indexed points data, [B, S, C] or [B, S1, S2, ..Sk, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def gather_operation(feature, idx): # [B, C, N], [B, npoint] -> [B, C, npoint]
if CUDA:
return futils.gather_operation(feature, idx)
return index_points(feature.transpose(-1, -2), idx).transpose(-1, -2)
def group_operation(feature, idx): # [B, C, N], idx [B, npoint, nsample] --> [B, C, npoint, nsample]
if CUDA:
return futils.grouping_operation(feature, idx)
return index_points(feature.transpose(-1, -2), idx).permute(0, 3, 1, 2)
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
if CUDA:
idx = futils.furthest_point_sample(xyz, npoint).long()
return idx
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
if CUDA:
return futils.ball_query(radius, nsample, xyz, new_xyz).long()
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask_first = group_first == N
group_first[mask_first] = 0
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def sample_and_group_all(xyz, points):
"""
Input:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, 3]
new_points: sampled points data, [B, 1, N, 3+D]
"""
device = xyz.device
B, N, C = xyz.shape
new_xyz = torch.zeros(B, 1, C).to(device)
grouped_xyz = xyz.view(B, 1, N, C)
if points is not None:
new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
else:
new_points = grouped_xyz
return new_xyz, new_points
class PointNetSetAbstractionMsg(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list, knn=False):
super(PointNetSetAbstractionMsg, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
self.out_channel = 0
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.out_channel += last_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
self.knn = knn
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
B, C, N = xyz.shape
S = self.npoint
fps_idx = farthest_point_sample(xyz.permute(0, 2, 1), S).int()
new_xyz = gather_operation(xyz, fps_idx) # [B, C, S]
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
if self.knn:
_, group_idx = knn_point(K, new_xyz.transpose(-1, -2), xyz.transpose(-1, -2))
else:
group_idx = query_ball_point(radius, K, xyz.transpose(-1, -2), new_xyz.transpose(-1, -2)) # [B, S, nsample]
grouped_xyz = group_operation(xyz, group_idx) # [B, C, S, nsample]
grouped_xyz -= new_xyz.view(B, C, S, 1)
if points is not None:
grouped_points = group_operation(points, group_idx) # [B, D, S, nsample]
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=1)
else:
grouped_points = grouped_xyz
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points))) # [B, D, S, nsample]
new_points = torch.max(grouped_points, -1)[0] # [B, D', S]
new_points_list.append(new_points)
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all, knn=False):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.out_channel = last_channel
self.group_all = group_all
self.knn = knn
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
assert 0, 'Not Implemented'
new_points = new_points.permute(0, 3, 2, 1) # [B, 1, N, 3 + D] --> [B, 3 + D, N, 1]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
self.out_channel = last_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, 1, N)
else:
dist, idx = three_nn(xyz1, xyz2)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = three_interpolate(points2, idx, weight) # [B, C, N]
if points1 is not None:
new_points = torch.cat([points1, interpolated_points], dim=-2)
else:
new_points = interpolated_points
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
return new_points
| 12,024 | 33.754335 | 124 | py |
RPMG | RPMG-main/ModelNet_PC/pointnet_lib/pytorch_utils.py | import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = "",
instance_norm: bool = False,
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact,
instance_norm=instance_norm
)
)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name="",
instance_norm=False,
instance_norm_func=None
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if instance_norm:
if not preact:
in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)
else:
in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm1d
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm2d
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
| 6,173 | 25.050633 | 95 | py |
RPMG | RPMG-main/ModelNet_PC/code/test.py | import torch
import numpy as np
import random
import os
from os.path import join as pjoin
import sys
import argparse
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0,pjoin(BASEPATH, '../..'))
sys.path.insert(0,pjoin(BASEPATH, '..'))
import config as Config
import utils.tools as tools
from model import Model
def test(test_folder, model):
seed = 1
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
geodesic_errors_lst = np.array([])
l = 0
test_path_list = [os.path.join(test_folder, i) for i in os.listdir(test_folder)]
for i in range(len(test_path_list)):
path = test_path_list[i]
tmp = torch.load(path)
pc2 = tmp['pc'].cpu().cuda()
gt_rmat = tmp['rgt'].cpu().cuda()
out_rmat, out_nd = model(pc2.transpose(1, 2))
l += ((gt_rmat - out_rmat) ** 2).sum()
geodesic_errors = np.array(
tools.compute_geodesic_distance_from_two_matrices(gt_rmat, out_rmat).data.tolist()) # batch
geodesic_errors = geodesic_errors / np.pi * 180
geodesic_errors_lst = np.append(geodesic_errors_lst, geodesic_errors)
l /= len(test_path_list)
return geodesic_errors_lst, l
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--config", type=str, required=True, help="Path to config")
arg_parser.add_argument("-c", "--checkpoint", type=int, default=-1, help="checkpoint number")
args = arg_parser.parse_args()
param=Config.Parameters()
param.read_config(pjoin("../configs", args.config))
test_folder = pjoin(param.data_folder, 'test_fix')
if args.checkpoint == -1:
allcp = os.listdir(param.write_weight_folder)
allcp.sort()
weight_path = pjoin(param.write_weight_folder, allcp[-1])
else:
weight_path = pjoin(param.write_weight_folder, "model_%07d.weight"%args.checkpoint)
with torch.no_grad():
model = Model(out_rotation_mode=param.out_rotation_mode)
print("Load " + weight_path)
f = torch.load(weight_path)
model.load_state_dict(f['model'])
model.cuda()
model.eval()
errors, l = test(test_folder, model)
np.save(param.write_weight_folder.replace('/weight',''), errors)
print("Loss: ", l)
print("median:"+str(np.round(np.median(errors),2)))
print("avg:" + str(np.round(errors.mean(), 2)))
print("max:" + str(np.round(errors.max(), 2)))
print("std:" + str(np.round(np.std(errors), 2)))
print("1 accuracy:"+str(np.round((errors<1).sum()/len(errors),3)))
print("3 accuracy:" + str(np.round((errors < 3).sum() / len(errors), 3)))
print("5 accuracy:"+str(np.round((errors<5).sum()/len(errors),3))) | 2,761 | 35.826667 | 104 | py |
RPMG | RPMG-main/ModelNet_PC/code/prepare_data.py | '''
from mesh to normalized pc
'''
import numpy as np
import torch
import os
from os.path import join as pjoin
import trimesh
import argparse
import sys
import tqdm
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0,pjoin(BASEPATH, '../..'))
import utils.tools as tools
def pc_normalize(pc):
centroid = (np.max(pc, axis=0) + np.min(pc, axis=0)) /2
pc = pc - centroid
scale = np.linalg.norm(np.max(pc, axis=0) - np.min(pc, axis=0))
pc = pc / scale
return pc, centroid, scale
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-d", "--data_dir", type=str, default='dataset/modelnet40_manually_aligned', help="Path to modelnet dataset")
arg_parser.add_argument("-c", "--category", type=str, default='airplane', help="category")
arg_parser.add_argument("-f", "--fix_test", action='store_false', help="for fair comparision")
args = arg_parser.parse_args()
sample_num = 4096
for mode in ['train', 'test']:
in_folder = pjoin(args.data_dir, args.category, mode)
out_folder = pjoin(args.data_dir, args.category, mode + '_pc')
os.makedirs(out_folder, exist_ok=True)
lst = [i for i in os.listdir(in_folder) if i[-4:] == '.off']
lst.sort()
for p in tqdm.tqdm(lst):
in_path = pjoin(in_folder, p)
out_path = pjoin(out_folder, p.replace('.off','.pts'))
if os.path.exists(out_path) and mode == 'train':
continue
mesh = trimesh.load(in_path, force='mesh')
pc, _ = trimesh.sample.sample_surface(mesh, sample_num)
pc = np.array(pc)
pc, centroid, scale = pc_normalize(pc)
np.savetxt(out_path, pc)
if mode == 'test' and args.fix_test:
fix_folder = pjoin(args.data_dir, args.category, mode + '_fix')
os.makedirs(fix_folder, exist_ok=True)
fix_path = pjoin(fix_folder, p.replace('.off','.pt'))
pc = np.random.permutation(pc)[:1024,:]
#each instance sample 10 rotations for test
rgt = tools.get_sampled_rotation_matrices_by_axisAngle(10).cpu()
pc = torch.bmm(rgt, torch.Tensor(pc).unsqueeze(0).repeat(10,1,1).transpose(2,1))
data_dict = {'pc':pc.transpose(1,2), 'rgt':rgt,'centroid':centroid, 'scale':scale}
torch.save(data_dict, fix_path)
| 2,445 | 39.766667 | 137 | py |
RPMG | RPMG-main/ModelNet_PC/code/dataset.py | import torch
import os
import numpy as np
class ModelNetDataset(torch.utils.data.Dataset):
def __init__(self, data_folder,sample_num=1024):
super(ModelNetDataset, self).__init__()
self.paths = [os.path.join(data_folder, i) for i in os.listdir(data_folder)]
self.sample_num = sample_num
self.size = len(self.paths)
print(f"dataset size: {self.size}")
def __getitem__(self, index):
fpath = self.paths[index % self.size]
pc = np.loadtxt(fpath)
pc = np.random.permutation(pc)
return pc[:self.sample_num, :].astype(float)
def __len__(self):
return self.size | 647 | 31.4 | 84 | py |
RPMG | RPMG-main/ModelNet_PC/code/train.py | import torch
import numpy as np
import os
from os.path import join as pjoin
import argparse
import sys
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0,pjoin(BASEPATH, '../..'))
sys.path.insert(0,pjoin(BASEPATH, '..'))
import utils.tools as tools
import utils.rpmg as rpmg
import config as Config
from dataset import ModelNetDataset
from model import Model
from test import test
def train_one_iteraton(pc, param, model, optimizer, iteration, tau):
optimizer.zero_grad()
batch=pc.shape[0]
point_num = param.sample_num
###get training data######
pc1 = torch.autograd.Variable(pc.float().cuda()) #num*3
gt_rmat = tools.get_sampled_rotation_matrices_by_axisAngle(batch)#batch*3*3
gt_rmats = gt_rmat.contiguous().view(batch,1,3,3).expand(batch, point_num, 3,3 ).contiguous().view(-1,3,3)
pc2 = torch.bmm(gt_rmats, pc1.view(-1,3,1))#(batch*point_num)*3*1
pc2 = pc2.view(batch, point_num, 3) ##batch,p_num,3
###network forward########
out_rmat,out_nd = model(pc2.transpose(1,2)) #output [batch(*sample_num),3,3]
####compute loss##########
if not param.use_rpmg:
loss = ((gt_rmat - out_rmat) ** 2).mean()
else:
out_9d = rpmg.RPMG.apply(out_nd, tau, param.rpmg_lambda, gt_rmat, iteration)
# note here L2 loss should be sum! Or it will affect tau.
loss = ((gt_rmat - out_9d)**2).sum()
# # flow loss. need to use tau=50
# loss = ((pc2 - torch.matmul(pc1, out_9d.transpose(-1,-2)))**2).mean()
# # geodesic loss. need to use tau=1/10 -> 1/2
# theta = tools.compute_geodesic_distance_from_two_matrices(gt_rmat, out_9d)
# loss = (theta **2).sum()
loss.backward()
optimizer.step()
if iteration % 100 == 0:
param.logger.add_scalar('train_loss', loss.item(), iteration)
if param.use_rpmg:
param.logger.add_scalar('k', tau, iteration)
param.logger.add_scalar('lambda', param.rpmg_lambda, iteration)
param.logger.add_scalar('nd_norm', out_nd.norm(dim=1).mean().item(), iteration)
return loss
# pc_lst: [point_num*3]
def train(param):
torch.cuda.set_device(param.device)
print ("####Initiate model")
model = Model(out_rotation_mode=param.out_rotation_mode).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=param.lr)
if param.start_iteration != 0:
read_path = pjoin(param.write_weight_folder, "model_%07d.weight"%param.start_iteration)
print("Load " + read_path)
checkpoint = torch.load(read_path)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_iteration = checkpoint['iteration']
else:
print('start from beginning')
start_iteration = param.start_iteration
print ("start train")
train_folder = os.path.join(param.data_folder, 'train_pc')
val_folder = os.path.join(param.data_folder, 'test_fix')
train_dataset = ModelNetDataset(train_folder, sample_num=param.sample_num)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=param.batch,
shuffle=True,
num_workers=4,
pin_memory=True
)
iteration = start_iteration
while True:
for data in train_loader:
model.train()
#lr decay
lr = max(param.lr * (0.7 ** (iteration // (param.total_iteration//10))), 1e-5)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
iteration += 1
if param.rpmg_tau_strategy == 1:
tau = 1/4
elif param.rpmg_tau_strategy == 2:
tau = 1/20
elif param.rpmg_tau_strategy == 3:
tau = 1 / 20 + (1 / 4 - 1 / 20) / 9 * min(iteration // (param.total_iteration//10), 9)
elif param.rpmg_tau_strategy == 4:
tau = -1
elif param.rpmg_tau_strategy == 5:
tau = 1 / 10 + (1 / 2 - 1 / 10) / 9 * min(iteration // (param.total_iteration//10), 9)
elif param.rpmg_tau_strategy == 6:
tau = 50
train_loss = train_one_iteraton(data, param, model, optimizer, iteration, tau)
if (iteration % param.save_weight_iteration == 0):
print("############# Iteration " + str(iteration) + " #####################")
print('train loss: ' + str(train_loss.item()))
model.eval()
with torch.no_grad():
angle_list, val_loss = test(val_folder, model)
print('val loss: ' + str( val_loss.item()) )
param.logger.add_scalar('val_loss', val_loss.item(), iteration)
param.logger.add_scalar('val_median',np.median(angle_list),iteration)
param.logger.add_scalar('val_mean', angle_list.mean(),iteration)
param.logger.add_scalar('val_max', angle_list.max(),iteration)
param.logger.add_scalar('val_5accuracy', (angle_list < 5).sum()/len(angle_list), iteration)
param.logger.add_scalar('val_3accuracy', (angle_list < 3).sum() / len(angle_list), iteration)
param.logger.add_scalar('val_1accuracy', (angle_list < 1).sum() / len(angle_list), iteration)
param.logger.add_scalar('lr', lr, iteration)
path = pjoin(param.write_weight_folder, "model_%07d.weight"%iteration)
state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'iteration': iteration}
torch.save(state, path)
if iteration >= param.total_iteration:
break
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--config", type=str, required=True, help="Path to config")
args = arg_parser.parse_args()
param=Config.Parameters()
param.read_config(pjoin("../configs", args.config))
print(f'use RPMG: {param.use_rpmg}')
print(f'lambda = {param.rpmg_lambda}')
if param.rpmg_tau_strategy == 1:
print('Tau = 1/4')
elif param.rpmg_tau_strategy == 2:
print('Tau = 1/20')
elif param.rpmg_tau_strategy == 3:
print('Tau = 1/20->1/4')
elif param.rpmg_tau_strategy == 4:
print('Tau = gt')
elif param.rpmg_tau_strategy == 5:
print('Tau = 1/10->1/2')
elif param.rpmg_tau_strategy == 6:
print('Tau = 50')
rpmg.logger_init(param.logger)
os.makedirs(param.write_weight_folder, exist_ok=True)
train(param)
| 6,589 | 37.764706 | 114 | py |
RPMG | RPMG-main/ModelNet_PC/code/visualize.py | import matplotlib.pyplot as plt
import torch
def visualize(pc, pred_r, gt_r):
pc_pred = torch.bmm(pc, pred_r)
pc_gt = torch.bmm(pc, gt_r)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pc[...,0], pc[...,1],pc[...,2])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pc_pred[..., 0], pc_pred[..., 1], pc_pred[..., 2])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pc_gt[..., 0], pc_gt[..., 1], pc_gt[..., 2])
plt.savefig('x.png')
| 525 | 29.941176 | 65 | py |
RPMG | RPMG-main/ModelNet_PC/code_selfsup/test.py | import torch
import numpy as np
import random
import os
from os.path import join as pjoin
import sys
import argparse
from chamfer_distance import ChamferDistance
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0,pjoin(BASEPATH, '../..'))
sys.path.insert(0,pjoin(BASEPATH, '..'))
import config as Config
import utils.tools as tools
from model import Model
def test(test_loader, model, pgt):
seed = 1
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
geodesic_errors_lst = np.array([])
l = 0
chamfer_loss = ChamferDistance()
for pc1 in test_loader:
pc1 = pc1.float().cuda()
batch, point_num, _ = pc1.shape
gt_rmat = tools.get_sampled_rotation_matrices_by_axisAngle(batch)#batch*3*3
gt_rmats = gt_rmat.contiguous().view(batch,1,3,3).expand(batch, point_num, 3,3 ).contiguous().view(-1,3,3)
pc2 = torch.bmm(gt_rmats, pc1.view(-1,3,1))#(batch*point_num)*3*1
pc2 = pc2.view(batch, point_num, 3)
gt_rmat = gt_rmat.float().cuda()
out_rmat, out_nd = model(pc2.transpose(1, 2))
gt = pgt.repeat(pc2.shape[0],1,1)
dist1, dist2 = chamfer_loss(torch.bmm(pc2, out_rmat), gt)
l += dist2.mean()+dist1.mean()
geodesic_errors = np.array(
tools.compute_geodesic_distance_from_two_matrices(gt_rmat, out_rmat).data.tolist()) # batch
geodesic_errors = geodesic_errors / np.pi * 180
geodesic_errors_lst = np.append(geodesic_errors_lst, geodesic_errors)
l /= len(test_loader)
return geodesic_errors_lst, l
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--config", type=str, required=True, help="Path to config")
arg_parser.add_argument("-c", "--checkpoint", type=int, default=-1, help="checkpoint number")
args = arg_parser.parse_args()
param=Config.Parameters()
param.read_config(pjoin("../configs", args.config))
test_folder = pjoin(param.data_folder, 'test_fix')
if args.checkpoint == -1:
allcp = os.listdir(param.write_weight_folder)
allcp.sort()
weight_path = pjoin(param.write_weight_folder, allcp[-1])
else:
weight_path = pjoin(param.write_weight_folder, "model_%07d.weight"%args.checkpoint)
with torch.no_grad():
model = Model(out_rotation_mode=param.out_rotation_mode)
print("Load " + weight_path)
f = torch.load(weight_path)
model.load_state_dict(f['model'])
model.cuda()
model.eval()
errors, l = test(test_folder, model)
print("Loss: ", l)
print("median:"+str(np.round(np.median(errors),2)))
print("avg:" + str(np.round(errors.mean(), 2)))
print("max:" + str(np.round(errors.max(), 2)))
print("std:" + str(np.round(np.std(errors), 2)))
print("1 accuracy:"+str(np.round((errors<1).sum()/len(errors),3)))
print("3 accuracy:" + str(np.round((errors < 3).sum() / len(errors), 3)))
print("5 accuracy:"+str(np.round((errors<5).sum()/len(errors),3))) | 3,079 | 37.024691 | 114 | py |
RPMG | RPMG-main/ModelNet_PC/code_selfsup/dataset.py | import torch
import numpy as np
import trimesh
def pc_normalize(pc):
centroid = (np.max(pc, axis=0) + np.min(pc, axis=0)) /2
pc = pc - centroid
scale = np.linalg.norm(np.max(pc, axis=0) - np.min(pc, axis=0))
pc = pc / scale
return pc, centroid, scale
class SingleInstanceDataset(torch.utils.data.Dataset):
def __init__(self, sample_num=1024, size=800):
super(SingleInstanceDataset, self).__init__()
pgt = trimesh.load('chair_0003.obj')
self.pgt, _, _ = pc_normalize(np.array(pgt.vertices))
self.sample_num = sample_num
self.size = size
print(f"use single instance! dataset size: {self.size}")
def __getitem__(self, index):
pc = np.random.permutation(self.pgt)
return pc[:self.sample_num, :].astype(float)
def __len__(self):
return self.size
def get_gt(self):
return self.pgt[:self.sample_num] | 922 | 30.827586 | 67 | py |
RPMG | RPMG-main/ModelNet_PC/code_selfsup/train.py | import torch
import numpy as np
import os
from os.path import join as pjoin
import argparse
import sys
from chamfer_distance import ChamferDistance
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0,pjoin(BASEPATH, '../..'))
sys.path.insert(0,pjoin(BASEPATH, '..'))
import utils.tools as tools
import utils.rpmg as rpmg
import config as Config
from dataset import SingleInstanceDataset
from model import Model
from test import test
def train_one_iteraton(pc, pgt, param, model, optimizer, iteration, tau):
optimizer.zero_grad()
batch=pc.shape[0]
point_num = param.sample_num
###get training data######
pc1 = torch.autograd.Variable(pc.float().cuda()) #num*3
gt_rmat = tools.get_sampled_rotation_matrices_by_axisAngle(batch)#batch*3*3
gt_rmats = gt_rmat.contiguous().view(batch,1,3,3).expand(batch, point_num, 3,3 ).contiguous().view(-1,3,3)
pc2 = torch.bmm(gt_rmats, pc1.view(-1,3,1))#(batch*point_num)*3*1
pc2 = pc2.view(batch, point_num, 3) ##batch,p_num,3
###network forward########
out_rmat,out_nd = model(pc2.transpose(1,2)) #output [batch(*sample_num),3,3]
####compute loss##########
if not param.use_rpmg:
chamfer_loss = ChamferDistance()
dist1, dist2 = chamfer_loss(torch.bmm(pc2, out_rmat), pgt.repeat(batch,1,1))
loss = dist1.mean()+dist2.mean()
else:
if param.rpmg_tau_strategy == 1:
out_9d = out_nd.reshape(-1,3,3)
else:
out_9d = rpmg.RPMG.apply(out_nd, tau, param.rpmg_lambda, gt_rmat, iteration)
chamfer_loss = ChamferDistance()
dist1, dist2 = chamfer_loss(torch.bmm(pc2, out_9d), pgt.repeat(batch,1,1))
loss = dist1.mean()+dist2.mean()
loss.backward()
optimizer.step()
if iteration % 100 == 0:
param.logger.add_scalar('train_loss', loss.item(), iteration)
if param.use_rpmg:
param.logger.add_scalar('k', tau, iteration)
param.logger.add_scalar('lambda', param.rpmg_lambda, iteration)
param.logger.add_scalar('nd_norm', out_nd.norm(dim=1).mean().item(), iteration)
return loss
# pc_lst: [point_num*3]
def train(param):
torch.cuda.set_device(param.device)
print ("####Initiate model")
model = Model(out_rotation_mode=param.out_rotation_mode).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=param.lr)
if param.start_iteration != 0:
read_path = pjoin(param.write_weight_folder, "model_%07d.weight"%param.start_iteration)
print("Load " + read_path)
checkpoint = torch.load(read_path)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_iteration = checkpoint['iteration']
else:
print('start from beginning')
start_iteration = param.start_iteration
print ("start train")
train_dataset = SingleInstanceDataset(size=800)
val_dataset = SingleInstanceDataset(size=200)
pgt = torch.tensor(train_dataset.get_gt()).to(param.device)[None,:].float()
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=param.batch,
shuffle=True,
num_workers=4,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=param.batch,
shuffle=False,
num_workers=4,
pin_memory=True
)
iteration = start_iteration
while True:
for data in train_loader:
model.train()
#lr decay
lr = max(param.lr * (0.7 ** (iteration // (param.total_iteration//10))), 1e-5)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
iteration += 1
if param.rpmg_tau_strategy == 1:
tau = -1
elif param.rpmg_tau_strategy == 2:
tau = 2
train_loss = train_one_iteraton(data, pgt, param, model, optimizer, iteration, tau)
if (iteration % param.save_weight_iteration == 0):
print("############# Iteration " + str(iteration) + " #####################")
print('train loss: ' + str(train_loss.item()))
model.eval()
with torch.no_grad():
angle_list, val_loss = test(val_loader, model, pgt)
print('val loss: ' + str( val_loss.item()) )
param.logger.add_scalar('val_loss', val_loss.item(), iteration)
param.logger.add_scalar('val_median',np.median(angle_list),iteration)
param.logger.add_scalar('val_mean', angle_list.mean(),iteration)
param.logger.add_scalar('val_max', angle_list.max(),iteration)
param.logger.add_scalar('val_5accuracy', (angle_list < 5).sum()/len(angle_list), iteration)
param.logger.add_scalar('val_3accuracy', (angle_list < 3).sum() / len(angle_list), iteration)
param.logger.add_scalar('val_1accuracy', (angle_list < 1).sum() / len(angle_list), iteration)
param.logger.add_scalar('lr', lr, iteration)
path = pjoin(param.write_weight_folder, "model_%07d.weight"%iteration)
state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'iteration': iteration}
torch.save(state, path)
if iteration >= param.total_iteration:
break
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--config", type=str, required=True, help="Path to config")
args = arg_parser.parse_args()
param=Config.Parameters()
param.read_config(pjoin("../configs", args.config))
print(f'use RPMG: {param.use_rpmg}')
print(f'lambda = {param.rpmg_lambda}')
if param.rpmg_tau_strategy == 1:
assert param.out_rotation_mode == 'svd9d'
print('Tau = Tgt')
elif param.rpmg_tau_strategy == 2:
print('Tau = 2')
rpmg.logger_init(param.logger)
os.makedirs(param.write_weight_folder, exist_ok=True)
train(param)
| 6,095 | 36.398773 | 114 | py |
RPMG | RPMG-main/ModelNet_PC/code_selfsup/visualize.py | import matplotlib.pyplot as plt
import torch
def visualize(pc, pred_r, gt_r):
pc_pred = torch.bmm(pc, pred_r)
pc_gt = torch.bmm(pc, gt_r)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pc[...,0], pc[...,1],pc[...,2])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pc_pred[..., 0], pc_pred[..., 1], pc_pred[..., 2])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pc_gt[..., 0], pc_gt[..., 1], pc_gt[..., 2])
plt.savefig('x.png')
| 525 | 29.941176 | 65 | py |
RPMG | RPMG-main/ModelNet_PC/code_selfsup/chamfer_distance/chamfer_distance.py |
import torch
from torch.utils.cpp_extension import load
cd = load(name="cd",
sources=["chamfer_distance/chamfer_distance.cpp",
"chamfer_distance/chamfer_distance.cu"])
class ChamferDistanceFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n, dtype=torch.int)
idx2 = torch.zeros(batchsize, m, dtype=torch.int)
if not xyz1.is_cuda:
cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
else:
dist1 = dist1.cuda()
dist2 = dist2.cuda()
idx1 = idx1.cuda()
idx2 = idx2.cuda()
cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return dist1, dist2
@staticmethod
def backward(ctx, graddist1, graddist2):
xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
if not graddist1.is_cuda:
cd.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
else:
gradxyz1 = gradxyz1.cuda()
gradxyz2 = gradxyz2.cuda()
cd.backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
return gradxyz1, gradxyz2
class ChamferDistance(torch.nn.Module):
def forward(self, xyz1, xyz2):
return ChamferDistanceFunction.apply(xyz1, xyz2)
| 1,798 | 30.017241 | 94 | py |
RPMG | RPMG-main/utils/tools.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
#rotation5d batch*5
def normalize_5d_rotation( r5d):
batch = r5d.shape[0]
sin_cos = r5d[:,0:2] #batch*2
sin_cos_mag = torch.max(torch.sqrt( sin_cos.pow(2).sum(1)), torch.autograd.Variable(torch.DoubleTensor([1e-8]).cuda()) ) #batch
sin_cos_mag=sin_cos_mag.view(batch,1).expand(batch,2) #batch*2
sin_cos = sin_cos/sin_cos_mag #batch*2
axis = r5d[:,2:5] #batch*3
axis_mag = torch.max(torch.sqrt( axis.pow(2).sum(1)), torch.autograd.Variable(torch.DoubleTensor([1e-8]).cuda()) ) #batch
axis_mag=axis_mag.view(batch,1).expand(batch,3) #batch*3
axis = axis/axis_mag #batch*3
out_rotation = torch.cat((sin_cos, axis),1) #batch*5
return out_rotation
#rotation5d batch*5
#out matrix batch*3*3
def rotation5d_to_matrix( r5d):
batch = r5d.shape[0]
sin = r5d[:,0].view(batch,1) #batch*1
cos= r5d[:,1].view(batch,1) #batch*1
x = r5d[:,2].view(batch,1) #batch*1
y = r5d[:,3].view(batch,1) #batch*1
z = r5d[:,4].view(batch,1) #batch*1
row1 = torch.cat( (cos + x*x*(1-cos), x*y*(1-cos)-z*sin, x*z*(1-cos)+y*sin ), 1) #batch*3
row2 = torch.cat( (y*x*(1-cos)+z*sin, cos+y*y*(1-cos), y*z*(1-cos)-x*sin ), 1) #batch*3
row3 = torch.cat( (z*x*(1-cos)-y*sin, z*y*(1-cos)+x*sin, cos+z*z*(1-cos) ), 1) #batch*3
matrix = torch.cat((row1.view(-1,1,3), row2.view(-1,1,3), row3.view(-1,1,3)), 1) #batch*3*3*seq_len
matrix = matrix.view(batch, 3,3)
return matrix
#T_poses num*3
#r_matrix batch*3*3
def compute_pose_from_rotation_matrix(T_pose, r_matrix):
batch=r_matrix.shape[0]
joint_num = T_pose.shape[0]
r_matrices = r_matrix.view(batch,1, 3,3).expand(batch,joint_num, 3,3).contiguous().view(batch*joint_num,3,3)
src_poses = T_pose.view(1,joint_num,3,1).expand(batch,joint_num,3,1).contiguous().view(batch*joint_num,3,1)
out_poses = torch.matmul(r_matrices, src_poses) #(batch*joint_num)*3*1
return out_poses.view(batch, joint_num,3)
# batch*n
def normalize_vector( v):
batch=v.shape[0]
v_mag = torch.sqrt(v.pow(2).sum(1))# batch
v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]).to(v.device)))
v_mag = v_mag.view(batch,1).expand(batch,v.shape[1])
v = v/v_mag
return v
# u, v batch*n
def cross_product( u, v):
batch = u.shape[0]
#print (u.shape)
#print (v.shape)
i = u[:,1]*v[:,2] - u[:,2]*v[:,1]
j = u[:,2]*v[:,0] - u[:,0]*v[:,2]
k = u[:,0]*v[:,1] - u[:,1]*v[:,0]
out = torch.cat((i.view(batch,1), j.view(batch,1), k.view(batch,1)),1)#batch*3
return out
#poses batch*6
#poses
def compute_rotation_matrix_from_ortho6d(poses):
x_raw = poses[:,0:3]#batch*3
y_raw = poses[:,3:6]#batch*3
x = normalize_vector(x_raw) #batch*3
z = cross_product(x,y_raw) #batch*3
z = normalize_vector(z)#batch*3
y = cross_product(z,x)#batch*3
x = x.view(-1,3,1)
y = y.view(-1,3,1)
z = z.view(-1,3,1)
matrix = torch.cat((x,y,z), 2) #batch*3*3
return matrix
#u,a batch*3
#out batch*3
def proj_u_a(u,a):
batch=u.shape[0]
top = u[:,0]*a[:,0] + u[:,1]*a[:,1]+u[:,2]*a[:,2]
bottom = u[:,0]*u[:,0] + u[:,1]*u[:,1]+u[:,2]*u[:,2]
bottom = torch.max(torch.autograd.Variable(torch.zeros(batch).cuda())+1e-8, bottom)
factor = (top/bottom).view(batch,1).expand(batch,3)
out = factor* u
return out
#matrices batch*3*3
def compute_rotation_matrix_from_matrix(matrices):
b = matrices.shape[0]
a1 = matrices[:,:,0]#batch*3
a2 = matrices[:,:,1]
a3 = matrices[:,:,2]
u1 = a1
u2 = a2 - proj_u_a(u1,a2)
u3 = a3 - proj_u_a(u1,a3) - proj_u_a(u2,a3)
e1 = normalize_vector(u1)
e2 = normalize_vector(u2)
e3 = normalize_vector(u3)
rmat = torch.cat((e1.view(b, 3,1), e2.view(b,3,1),e3.view(b,3,1)), 2)
return rmat
#in batch*5
#out batch*6
def stereographic_unproject_old(a):
s2 = torch.pow(a,2).sum(1) #batch
unproj= 2*a/ (s2+1).view(-1,1).repeat(1,5) #batch*5
w = (s2-1)/(s2+1) #batch
out = torch.cat((unproj, w.view(-1,1)), 1) #batch*6
return out
#in a batch*5, axis int
def stereographic_unproject(a, axis=None):
"""
Inverse of stereographic projection: increases dimension by one.
"""
batch=a.shape[0]
if axis is None:
axis = a.shape[1]
s2 = torch.pow(a,2).sum(1) #batch
ans = torch.autograd.Variable(torch.zeros(batch, a.shape[1]+1).cuda()) #batch*6
unproj = 2*a/(s2+1).view(batch,1).repeat(1,a.shape[1]) #batch*5
if(axis>0):
ans[:,:axis] = unproj[:,:axis] #batch*(axis-0)
ans[:,axis] = (s2-1)/(s2+1) #batch
ans[:,axis+1:] = unproj[:,axis:] #batch*(5-axis) # Note that this is a no-op if the default option (last axis) is used
return ans
#a batch*5
#out batch*3*3
def compute_rotation_matrix_from_ortho5d(a):
batch = a.shape[0]
proj_scale_np = np.array([np.sqrt(2)+1, np.sqrt(2)+1, np.sqrt(2)]) #3
proj_scale = torch.autograd.Variable(torch.FloatTensor(proj_scale_np).cuda()).view(1,3).repeat(batch,1) #batch,3
u = stereographic_unproject(a[:, 2:5] * proj_scale, axis=0)#batch*4
norm = torch.sqrt(torch.pow(u[:,1:],2).sum(1)) #batch
u = u/ norm.view(batch,1).repeat(1,u.shape[1]) #batch*4
b = torch.cat((a[:,0:2], u),1)#batch*6
matrix = compute_rotation_matrix_from_ortho6d(b)
return matrix
#quaternion batch*4
def compute_rotation_matrix_from_quaternion( quaternion, n_flag=True):
batch=quaternion.shape[0]
if n_flag:
quat = normalize_vector(quaternion)
else:
quat = quaternion
qw = quat[...,0].view(batch, 1)
qx = quat[...,1].view(batch, 1)
qy = quat[...,2].view(batch, 1)
qz = quat[...,3].view(batch, 1)
# Unit quaternion rotation matrices computatation
xx = qx*qx
yy = qy*qy
zz = qz*qz
xy = qx*qy
xz = qx*qz
yz = qy*qz
xw = qx*qw
yw = qy*qw
zw = qz*qw
row0 = torch.cat((1-2*yy-2*zz, 2*xy - 2*zw, 2*xz + 2*yw), 1) #batch*3
row1 = torch.cat((2*xy+ 2*zw, 1-2*xx-2*zz, 2*yz-2*xw ), 1) #batch*3
row2 = torch.cat((2*xz-2*yw, 2*yz+2*xw, 1-2*xx-2*yy), 1) #batch*3
matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch,1,3), row2.view(batch,1,3)),1) #batch*3*3
return matrix
#axisAngle batch*4 angle, x,y,z
def compute_rotation_matrix_from_axisAngle(axisAngle):
batch = axisAngle.shape[0]
theta = axisAngle[:,0]
#theta = torch.tanh(axisAngle[:,0])*np.pi #[-180, 180]
sin = torch.sin(theta/2)
axis = normalize_vector(axisAngle[:,1:4]) #batch*3
qw = torch.cos(theta/2)
qx = axis[:,0]*sin
qy = axis[:,1]*sin
qz = axis[:,2]*sin
# Unit quaternion rotation matrices computatation
xx = (qx*qx).view(batch,1)
yy = (qy*qy).view(batch,1)
zz = (qz*qz).view(batch,1)
xy = (qx*qy).view(batch,1)
xz = (qx*qz).view(batch,1)
yz = (qy*qz).view(batch,1)
xw = (qx*qw).view(batch,1)
yw = (qy*qw).view(batch,1)
zw = (qz*qw).view(batch,1)
row0 = torch.cat((1-2*yy-2*zz, 2*xy - 2*zw, 2*xz + 2*yw), 1) #batch*3
row1 = torch.cat((2*xy+ 2*zw, 1-2*xx-2*zz, 2*yz-2*xw ), 1) #batch*3
row2 = torch.cat((2*xz-2*yw, 2*yz+2*xw, 1-2*xx-2*yy), 1) #batch*3
matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch,1,3), row2.view(batch,1,3)),1) #batch*3*3
return matrix
#axisAngle batch*3 a,b,c
def compute_rotation_matrix_from_hopf( hopf):
batch = hopf.shape[0]
theta = (torch.tanh(hopf[:,0])+1.0)*np.pi/2.0 #[0, pi]
phi = (torch.tanh(hopf[:,1])+1.0)*np.pi #[0,2pi)
tao = (torch.tanh(hopf[:,2])+1.0)*np.pi #[0,2pi)
qw = torch.cos(theta/2)*torch.cos(tao/2)
qx = torch.cos(theta/2)*torch.sin(tao/2)
qy = torch.sin(theta/2)*torch.cos(phi+tao/2)
qz = torch.sin(theta/2)*torch.sin(phi+tao/2)
# Unit quaternion rotation matrices computatation
xx = (qx*qx).view(batch,1)
yy = (qy*qy).view(batch,1)
zz = (qz*qz).view(batch,1)
xy = (qx*qy).view(batch,1)
xz = (qx*qz).view(batch,1)
yz = (qy*qz).view(batch,1)
xw = (qx*qw).view(batch,1)
yw = (qy*qw).view(batch,1)
zw = (qz*qw).view(batch,1)
row0 = torch.cat((1-2*yy-2*zz, 2*xy - 2*zw, 2*xz + 2*yw), 1) #batch*3
row1 = torch.cat((2*xy+ 2*zw, 1-2*xx-2*zz, 2*yz-2*xw ), 1) #batch*3
row2 = torch.cat((2*xz-2*yw, 2*yz+2*xw, 1-2*xx-2*yy), 1) #batch*3
matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch,1,3), row2.view(batch,1,3)),1) #batch*3*3
return matrix
#euler batch*4
#output cuda batch*3*3 matrices in the rotation order of XZ'Y'' (intrinsic) or YZX (extrinsic)
def compute_rotation_matrix_from_euler(euler):
batch=euler.shape[0]
c1=torch.cos(euler[:,0]).view(batch,1)#batch*1
s1=torch.sin(euler[:,0]).view(batch,1)#batch*1
c2=torch.cos(euler[:,2]).view(batch,1)#batch*1
s2=torch.sin(euler[:,2]).view(batch,1)#batch*1
c3=torch.cos(euler[:,1]).view(batch,1)#batch*1
s3=torch.sin(euler[:,1]).view(batch,1)#batch*1
row1=torch.cat((c2*c3, -s2, c2*s3 ), 1).view(-1,1,3) #batch*1*3
row2=torch.cat((c1*s2*c3+s1*s3, c1*c2, c1*s2*s3-s1*c3), 1).view(-1,1,3) #batch*1*3
row3=torch.cat((s1*s2*c3-c1*s3, s1*c2, s1*s2*s3+c1*c3), 1).view(-1,1,3) #batch*1*3
matrix = torch.cat((row1, row2, row3), 1) #batch*3*3
return matrix
#m batch*3*3
#out batch*4*4
def get_44_rotation_matrix_from_33_rotation_matrix(m):
batch = m.shape[0]
row4 = torch.autograd.Variable(torch.zeros(batch, 1,3).cuda())
m43 = torch.cat((m, row4),1)#batch*4,3
col4 = torch.autograd.Variable(torch.zeros(batch,4,1).cuda())
col4[:,3,0]=col4[:,3,0]+1
out=torch.cat((m43, col4), 2) #batch*4*4
return out
#matrices batch*3*3
#both matrix are orthogonal rotation matrices
#out theta between 0 to 180 degree batch
def compute_geodesic_distance_from_two_matrices(m1, m2):
batch=m1.shape[0]
m = torch.bmm(m1, m2.transpose(1,2)) #batch*3*3
cos = ( m[:,0,0] + m[:,1,1] + m[:,2,2] - 1 )/2
cos = torch.min(cos, torch.autograd.Variable(torch.ones(batch).cuda()) )
cos = torch.max(cos, torch.autograd.Variable(torch.ones(batch).cuda())*-1 )
theta = torch.acos(cos)
#theta = torch.min(theta, 2*np.pi - theta)
return theta
#matrices batch*3*3
#both matrix are orthogonal rotation matrices
#out theta between 0 to pi batch
def compute_angle_from_r_matrices(m):
batch=m.shape[0]
cos = ( m[:,0,0] + m[:,1,1] + m[:,2,2] - 1 )/2
cos = torch.min(cos, torch.autograd.Variable(torch.ones(batch).cuda()) )
cos = torch.max(cos, torch.autograd.Variable(torch.ones(batch).cuda())*-1 )
theta = torch.acos(cos)
return theta
def get_sampled_rotation_matrices_by_quat(batch):
#quat = torch.autograd.Variable(torch.rand(batch,4).cuda())
quat = torch.autograd.Variable(torch.randn(batch, 4).cuda())
matrix = compute_rotation_matrix_from_quaternion(quat)
return matrix
def get_sampled_rotation_matrices_by_hpof(batch):
theta = torch.autograd.Variable(torch.FloatTensor(np.random.uniform(0,1, batch)*np.pi).cuda()) #[0, pi]
phi = torch.autograd.Variable(torch.FloatTensor(np.random.uniform(0,2,batch)*np.pi).cuda()) #[0,2pi)
tao = torch.autograd.Variable(torch.FloatTensor(np.random.uniform(0,2,batch)*np.pi).cuda()) #[0,2pi)
qw = torch.cos(theta/2)*torch.cos(tao/2)
qx = torch.cos(theta/2)*torch.sin(tao/2)
qy = torch.sin(theta/2)*torch.cos(phi+tao/2)
qz = torch.sin(theta/2)*torch.sin(phi+tao/2)
# Unit quaternion rotation matrices computatation
xx = (qx*qx).view(batch,1)
yy = (qy*qy).view(batch,1)
zz = (qz*qz).view(batch,1)
xy = (qx*qy).view(batch,1)
xz = (qx*qz).view(batch,1)
yz = (qy*qz).view(batch,1)
xw = (qx*qw).view(batch,1)
yw = (qy*qw).view(batch,1)
zw = (qz*qw).view(batch,1)
row0 = torch.cat((1-2*yy-2*zz, 2*xy - 2*zw, 2*xz + 2*yw), 1) #batch*3
row1 = torch.cat((2*xy+ 2*zw, 1-2*xx-2*zz, 2*yz-2*xw ), 1) #batch*3
row2 = torch.cat((2*xz-2*yw, 2*yz+2*xw, 1-2*xx-2*yy), 1) #batch*3
matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch,1,3), row2.view(batch,1,3)),1) #batch*3*3
return matrix
#axisAngle batch*3*3s angle, x,y,z
def get_sampled_rotation_matrices_by_axisAngle( batch):
theta = torch.autograd.Variable(torch.FloatTensor(np.random.uniform(-1,1, batch)*np.pi).cuda()) #[0, pi] #[-180, 180]
sin = torch.sin(theta)
axis = torch.autograd.Variable(torch.randn(batch, 3).cuda())
axis = normalize_vector(axis) #batch*3
qw = torch.cos(theta)
qx = axis[:,0]*sin
qy = axis[:,1]*sin
qz = axis[:,2]*sin
# Unit quaternion rotation matrices computatation
xx = (qx*qx).view(batch,1)
yy = (qy*qy).view(batch,1)
zz = (qz*qz).view(batch,1)
xy = (qx*qy).view(batch,1)
xz = (qx*qz).view(batch,1)
yz = (qy*qz).view(batch,1)
xw = (qx*qw).view(batch,1)
yw = (qy*qw).view(batch,1)
zw = (qz*qw).view(batch,1)
row0 = torch.cat((1-2*yy-2*zz, 2*xy - 2*zw, 2*xz + 2*yw), 1) #batch*3
row1 = torch.cat((2*xy+ 2*zw, 1-2*xx-2*zz, 2*yz-2*xw ), 1) #batch*3
row2 = torch.cat((2*xz-2*yw, 2*yz+2*xw, 1-2*xx-2*yy), 1) #batch*3
matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch,1,3), row2.view(batch,1,3)),1) #batch*3*3
return matrix
#input batch*4*4 or batch*3*3
#output torch batch*3 x, y, z in radiant
#the rotation is in the sequence of x,y,z
def compute_euler_angles_from_rotation_matrices(rotation_matrices):
batch=rotation_matrices.shape[0]
R=rotation_matrices
sy = torch.sqrt(R[:,0,0]*R[:,0,0]+R[:,1,0]*R[:,1,0])
singular= sy<1e-6
singular=singular.float()
x=torch.atan2(R[:,2,1], R[:,2,2])
y=torch.atan2(-R[:,2,0], sy)
z=torch.atan2(R[:,1,0],R[:,0,0])
xs=torch.atan2(-R[:,1,2], R[:,1,1])
ys=torch.atan2(-R[:,2,0], sy)
zs=R[:,1,0]*0
out_euler=torch.autograd.Variable(torch.zeros(batch,3).cuda())
out_euler[:,0]=x*(1-singular)+xs*singular
out_euler[:,1]=y*(1-singular)+ys*singular
out_euler[:,2]=z*(1-singular)+zs*singular
return out_euler
#input batch*4
#output batch*4
def compute_quaternions_from_axisAngles(self, axisAngles):
w = torch.cos(axisAngles[:,0]/2)
sin = torch.sin(axisAngles[:,0]/2)
x = sin*axisAngles[:,1]
y = sin*axisAngles[:,2]
z = sin*axisAngles[:,3]
quat = torch.cat((w.view(-1,1), x.view(-1,1), y.view(-1,1), z.view(-1,1)), 1)
return quat
#quaternions batch*4,
#matrices batch*4*4 or batch*3*3
def compute_quaternions_from_rotation_matrices(matrices):
batch=matrices.shape[0]
w=torch.sqrt(torch.max(1.0 + matrices[:,0,0] + matrices[:,1,1] + matrices[:,2,2], torch.zeros(1).cuda())) / 2.0
w = torch.max (w , torch.autograd.Variable(torch.zeros(batch).cuda())+1e-8) #batch
w4 = 4.0 * w
x= (matrices[:,2,1] - matrices[:,1,2]) / w4
y= (matrices[:,0,2] - matrices[:,2,0]) / w4
z= (matrices[:,1,0] - matrices[:,0,1]) / w4
quats = torch.cat( (w.view(batch,1), x.view(batch, 1),y.view(batch, 1), z.view(batch, 1) ), 1 )
quats = normalize_vector(quats)
return quats
def compute_v_wave(u, r_new):
u_star = r_new[:, :, 0]
u_out = normalize_vector(u)
u_2 = normalize_vector(cross_product(u_out, u_star))
real_angle = torch.acos(torch.clamp((u_out * u_star).sum(dim=1, keepdim=True), -1, 1))
ro = compute_rotation_matrix_from_axisAngle(torch.cat([real_angle / 2, u_2], 1))
v_new = torch.bmm(r_new.transpose(1, 2), ro)[:, 1, :]
return v_new
def symmetric_orthogonalization(x):
"""Maps 9D input vectors onto SO(3) via symmetric orthogonalization.
x: should have size [batch_size, 9]
Output has size [batch_size, 3, 3], where each inner 3x3 matrix is in SO(3).
"""
m = x.view(-1, 3, 3)
d = m.device
u, s, v = torch.svd(m.cpu())
u, v = u.to(d), v.to(d)
vt = torch.transpose(v, 1, 2)
det = torch.det(torch.bmm(u, vt))
det = det.view(-1, 1, 1)
vt = torch.cat((vt[:, :2, :], vt[:, -1:, :] * det), 1)
r = torch.bmm(u, vt)
return r
def compute_SVD_nearest_Mnlsew(R, Rg):
'''
solve the minimum problem —— Find X to minimizing L2(R - S*Rg) while S is a symmetry matrix
:param R: Network output Rotation matrix [b, 3, 3]
:param Rg: next_goal Rotation matrix [b,3,3]
:return: M
'''
S = (torch.bmm(R, Rg.transpose(2,1))+torch.bmm(Rg,R.transpose(2,1)))/2
M = torch.bmm(S, Rg)
return M.reshape(-1,9)
def convert_Avec_to_A(A_vec):
""" Convert BxM tensor to BxNxN symmetric matrices """
""" M = N*(N+1)/2"""
if A_vec.dim() < 2:
A_vec = A_vec.unsqueeze(dim=0)
if A_vec.shape[1] == 10:
A_dim = 4
elif A_vec.shape[1] == 55:
A_dim = 10
else:
raise ValueError("Arbitrary A_vec not yet implemented")
idx = torch.triu_indices(A_dim, A_dim)
A = A_vec.new_zeros((A_vec.shape[0], A_dim, A_dim))
A[:, idx[0], idx[1]] = A_vec
A[:, idx[1], idx[0]] = A_vec
# return A.squeeze()
return A
def convert_A_to_Avec(A):
""" Convert BxNxN symmetric matrices to BxM tensor"""
""" M = N*(N+1)/2"""
idx = torch.triu_indices(4, 4)
A_vec = A[:, idx[0], idx[1]]
return A_vec
def compute_rotation_matrix_from_10d(x):
A = convert_Avec_to_A(x)
d = A.device
_, evs = torch.symeig(A.cpu(), eigenvectors=True)
evs = evs.to(d)
q = evs[:,:,0]
return compute_rotation_matrix_from_quaternion(q,n_flag=False)
#x: [B, 10] raw output of network
#qg: [B, 4] updated quaternion
def compute_nearest_10d(x, qg, prev_eigenval=None):
# [4,4]*[4,1] -> [4,10]*[10,1]
d = qg.device
b = qg.shape[0]
assert len(qg.shape) == 2
X_matrix = torch.zeros((b,4,10),device=d)
Id = torch.eye(10,device=d)[None,...].repeat(b,1,1)
Ze = torch.zeros((b,4,4),device=d)
X_matrix[:, 0,0:4] = qg
X_matrix[:, 1,[1,4,5,6]] = qg
X_matrix[:, 2,[2,5,7,8]] = qg
X_matrix[:, 3,[3,6,8,9]] = qg
#[[I, X_m^T],[X_m, 0]]
KKT_l = torch.cat([Id, X_matrix], dim=1)
KKT_r = torch.cat([X_matrix.transpose(-1,-2), Ze], dim=1)
KKT = torch.cat([KKT_l, KKT_r], dim=2)
KKT_part = torch.inverse(KKT)[:, :10, -4:]
qgs = qg.unsqueeze(-1)
A = convert_Avec_to_A(x)
Aqs = torch.bmm(A, qgs)
if prev_eigenval is None:
KKT_M = torch.bmm(KKT_part.transpose(-1,-2), KKT_part)
eigenval = (torch.bmm(torch.bmm(qgs.transpose(-1,-2), KKT_M),Aqs)+torch.bmm(torch.bmm(Aqs.transpose(-1,-2), KKT_M), qgs))/(2*torch.bmm(torch.bmm(qgs.transpose(-1,-2), KKT_M), qgs))
else:
eigenval = prev_eigenval
new_M = torch.bmm(KKT_part, eigenval*qgs-Aqs)
new_x = new_M.squeeze()+x
return new_x | 19,121 | 32.606327 | 188 | py |
RPMG | RPMG-main/utils/rpmg.py | import torch
import sys
import os
BASEPATH = os.path.dirname(__file__)
sys.path.append(BASEPATH)
import tools
def Rodrigues(w):
'''
axis angle -> rotation
:param w: [b,3]
:return: R: [b,3,3]
'''
w = w.unsqueeze(2).unsqueeze(3).repeat(1, 1, 3, 3)
b = w.shape[0]
theta = w.norm(dim=1)
#print(theta[0])
#theta = torch.where(t>math.pi/16, torch.Tensor([math.pi/16]).cuda(), t)
wnorm = w / (w.norm(dim=1,keepdim=True)+0.001)
#wnorm = torch.nn.functional.normalize(w,dim=1)
I = torch.eye(3, device=w.get_device()).repeat(b, 1, 1)
help1 = torch.zeros((b,1,3, 3), device=w.get_device())
help2 = torch.zeros((b,1,3, 3), device=w.get_device())
help3 = torch.zeros((b,1,3, 3), device=w.get_device())
help1[:,:,1, 2] = -1
help1[:,:,2, 1] = 1
help2[:,:,0, 2] = 1
help2[:,:,2, 0] = -1
help3[:,:,0, 1] = -1
help3[:,:,1, 0] = 1
Jwnorm = (torch.cat([help1,help2,help3],1)*wnorm).sum(dim=1)
return I + torch.sin(theta) * Jwnorm + (1 - torch.cos(theta)) * torch.bmm(Jwnorm, Jwnorm)
logger = 0
def logger_init(ll):
global logger
logger = ll
print('logger init')
class RPMG(torch.autograd.Function):
'''
full version. See "simple_RPMG()" for a simplified version.
Tips:
1. Use "logger_init()" to initialize the logger, if you want to record some intermidiate variables by tensorboard.
2. Use sum of L2/geodesic loss instead of mean, since our tau_converge is derivated without considering the scalar introduced by mean loss.
See <ModelNet_PC> for an example.
3. Pass "weight=$YOUR_WEIGHT" instead of directly multiple the weight on rotation loss, if you want to reweight R loss and other losses.
See <poselstm-pytorch> for an example.
'''
@staticmethod
def forward(ctx, in_nd, tau, lam, rgt, iter, weight=1):
proj_kind = in_nd.shape[1]
if proj_kind == 6:
r0 = tools.compute_rotation_matrix_from_ortho6d(in_nd)
elif proj_kind == 9:
r0 = tools.symmetric_orthogonalization(in_nd)
elif proj_kind == 4:
r0 = tools.compute_rotation_matrix_from_quaternion(in_nd)
elif proj_kind == 10:
r0 = tools.compute_rotation_matrix_from_10d(in_nd)
else:
raise NotImplementedError
ctx.save_for_backward(in_nd, r0, torch.Tensor([tau,lam, iter, weight]), rgt)
return r0
@staticmethod
def backward(ctx, grad_in):
in_nd, r0, config,rgt, = ctx.saved_tensors
tau = config[0]
lam = config[1]
b = r0.shape[0]
iter = config[2]
weight = config[3]
proj_kind = in_nd.shape[1]
# use Riemannian optimization to get the next goal R
if tau == -1:
r_new = rgt
else:
# Eucliean gradient -> Riemannian gradient
Jx = torch.zeros((b, 3, 3)).cuda()
Jx[:, 2, 1] = 1
Jx[:, 1, 2] = -1
Jy = torch.zeros((b, 3, 3)).cuda()
Jy[:, 0, 2] = 1
Jy[:, 2, 0] = -1
Jz = torch.zeros((b, 3, 3)).cuda()
Jz[:, 0, 1] = -1
Jz[:, 1, 0] = 1
gx = (grad_in*torch.bmm(r0, Jx)).reshape(-1,9).sum(dim=1,keepdim=True)
gy = (grad_in * torch.bmm(r0, Jy)).reshape(-1, 9).sum(dim=1,keepdim=True)
gz = (grad_in * torch.bmm(r0, Jz)).reshape(-1, 9).sum(dim=1,keepdim=True)
g = torch.cat([gx,gy,gz],1)
# take one step
delta_w = -tau * g
# update R
r_new = torch.bmm(r0, Rodrigues(delta_w))
#this can help you to tune the tau if you don't use L2/geodesic loss.
if iter % 100 == 0:
logger.add_scalar('next_goal_angle_mean', delta_w.norm(dim=1).mean(), iter)
logger.add_scalar('next_goal_angle_max', delta_w.norm(dim=1).max(), iter)
R0_Rgt = tools.compute_geodesic_distance_from_two_matrices(r0, rgt)
logger.add_scalar('r0_rgt_angle', R0_Rgt.mean(), iter)
# inverse & project
if proj_kind == 6:
r_proj_1 = (r_new[:, :, 0] * in_nd[:, :3]).sum(dim=1, keepdim=True) * r_new[:, :, 0]
r_proj_2 = (r_new[:, :, 0] * in_nd[:, 3:]).sum(dim=1, keepdim=True) * r_new[:, :, 0] \
+ (r_new[:, :, 1] * in_nd[:, 3:]).sum(dim=1, keepdim=True) * r_new[:, :, 1]
r_reg_1 = lam * (r_proj_1 - r_new[:, :, 0])
r_reg_2 = lam * (r_proj_2 - r_new[:, :, 1])
gradient_nd = torch.cat([in_nd[:, :3] - r_proj_1 + r_reg_1, in_nd[:, 3:] - r_proj_2 + r_reg_2], 1)
elif proj_kind == 9:
SVD_proj = tools.compute_SVD_nearest_Mnlsew(in_nd.reshape(-1,3,3), r_new)
gradient_nd = in_nd - SVD_proj + lam * (SVD_proj - r_new.reshape(-1,9))
R_proj_g = tools.symmetric_orthogonalization(SVD_proj)
if iter % 100 == 0:
logger.add_scalar('9d_reflection', (((R_proj_g-r_new).reshape(-1,9).abs().sum(dim=1))>5e-1).sum(), iter)
logger.add_scalar('reg', (SVD_proj - r_new.reshape(-1, 9)).norm(dim=1).mean(), iter)
logger.add_scalar('main', (in_nd - SVD_proj).norm(dim=1).mean(), iter)
elif proj_kind == 4:
q_1 = tools.compute_quaternions_from_rotation_matrices(r_new)
q_2 = -q_1
normalized_nd = tools.normalize_vector(in_nd)
q_new = torch.where(
(q_1 - normalized_nd).norm(dim=1, keepdim=True) < (q_2 - normalized_nd).norm(dim=1, keepdim=True),
q_1, q_2)
q_proj = (in_nd * q_new).sum(dim=1, keepdim=True) * q_new
gradient_nd = in_nd - q_proj + lam * (q_proj - q_new)
elif proj_kind == 10:
qg = tools.compute_quaternions_from_rotation_matrices(r_new)
new_x = tools.compute_nearest_10d(in_nd, qg)
reg_A = torch.eye(4, device=qg.device)[None].repeat(qg.shape[0],1,1) - torch.bmm(qg.unsqueeze(-1), qg.unsqueeze(-2))
reg_x = tools.convert_A_to_Avec(reg_A)
gradient_nd = in_nd - new_x + lam * (new_x - reg_x)
if iter % 100 == 0:
logger.add_scalar('reg', (new_x - reg_x).norm(dim=1).mean(), iter)
logger.add_scalar('main', (in_nd - new_x).norm(dim=1).mean(), iter)
return gradient_nd * weight, None, None,None,None,None
class simple_RPMG(torch.autograd.Function):
'''
simplified version without tensorboard and r_gt.
'''
@staticmethod
def forward(ctx, in_nd, tau, lam, weight=1):
proj_kind = in_nd.shape[1]
if proj_kind == 6:
r0 = tools.compute_rotation_matrix_from_ortho6d(in_nd)
elif proj_kind == 9:
r0 = tools.symmetric_orthogonalization(in_nd)
elif proj_kind == 4:
r0 = tools.compute_rotation_matrix_from_quaternion(in_nd)
elif proj_kind == 10:
r0 = tools.compute_rotation_matrix_from_10d(in_nd)
else:
raise NotImplementedError
ctx.save_for_backward(in_nd, r0, torch.Tensor([tau,lam, weight]))
return r0
@staticmethod
def backward(ctx, grad_in):
in_nd, r0, config, = ctx.saved_tensors
tau = config[0]
lam = config[1]
weight = config[2]
b = r0.shape[0]
proj_kind = in_nd.shape[1]
# use Riemannian optimization to get the next goal R
# Eucliean gradient -> Riemannian gradient
Jx = torch.zeros((b, 3, 3)).cuda()
Jx[:, 2, 1] = 1
Jx[:, 1, 2] = -1
Jy = torch.zeros((b, 3, 3)).cuda()
Jy[:, 0, 2] = 1
Jy[:, 2, 0] = -1
Jz = torch.zeros((b, 3, 3)).cuda()
Jz[:, 0, 1] = -1
Jz[:, 1, 0] = 1
gx = (grad_in*torch.bmm(r0, Jx)).reshape(-1,9).sum(dim=1,keepdim=True)
gy = (grad_in * torch.bmm(r0, Jy)).reshape(-1, 9).sum(dim=1,keepdim=True)
gz = (grad_in * torch.bmm(r0, Jz)).reshape(-1, 9).sum(dim=1,keepdim=True)
g = torch.cat([gx,gy,gz],1)
# take one step
delta_w = -tau * g
# update R
r_new = torch.bmm(r0, Rodrigues(delta_w))
# inverse & project
if proj_kind == 6:
r_proj_1 = (r_new[:, :, 0] * in_nd[:, :3]).sum(dim=1, keepdim=True) * r_new[:, :, 0]
r_proj_2 = (r_new[:, :, 0] * in_nd[:, 3:]).sum(dim=1, keepdim=True) * r_new[:, :, 0] \
+ (r_new[:, :, 1] * in_nd[:, 3:]).sum(dim=1, keepdim=True) * r_new[:, :, 1]
r_reg_1 = lam * (r_proj_1 - r_new[:, :, 0])
r_reg_2 = lam * (r_proj_2 - r_new[:, :, 1])
gradient_nd = torch.cat([in_nd[:, :3] - r_proj_1 + r_reg_1, in_nd[:, 3:] - r_proj_2 + r_reg_2], 1)
elif proj_kind == 9:
SVD_proj = tools.compute_SVD_nearest_Mnlsew(in_nd.reshape(-1,3,3), r_new)
gradient_nd = in_nd - SVD_proj + lam * (SVD_proj - r_new.reshape(-1,9))
elif proj_kind == 4:
q_1 = tools.compute_quaternions_from_rotation_matrices(r_new)
q_2 = -q_1
normalized_nd = tools.normalize_vector(in_nd)
q_new = torch.where(
(q_1 - normalized_nd).norm(dim=1, keepdim=True) < (q_2 - normalized_nd).norm(dim=1, keepdim=True),
q_1, q_2)
q_proj = (in_nd * q_new).sum(dim=1, keepdim=True) * q_new
gradient_nd = in_nd - q_proj + lam * (q_proj - q_new)
elif proj_kind == 10:
qg = tools.compute_quaternions_from_rotation_matrices(r_new)
new_x = tools.compute_nearest_10d(in_nd, qg)
reg_A = torch.eye(4, device=qg.device)[None].repeat(qg.shape[0],1,1) - torch.bmm(qg.unsqueeze(-1), qg.unsqueeze(-2))
reg_x = tools.convert_A_to_Avec(reg_A)
gradient_nd = in_nd - new_x + lam * (new_x - reg_x)
return gradient_nd * weight, None, None,None,None,None
| 9,942 | 42.801762 | 148 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/dataset.py | from lib.datasets.Dataset_Base import Dataset_Base
import os
import numpy as np
import torch
import sys
BASEPATH = os.path.dirname(__file__)
sys.path.append(os.path.join(BASEPATH, '..', '..', 'utils'))
import tools
from torch.utils.data import Dataset, DataLoader
import cv2
class MyDataset(Dataset_Base):
def __getitem__(self, idx):
rc = self.recs[idx]
cate = rc.category
# img_id = rc.img_id
quat = rc.so3.quaternion
quat = torch.from_numpy(quat)
img = self._get_image(rc)
img = torch.from_numpy(img)
sample = dict(idx=idx,
label=self.cate2ind[cate],
quat=quat,
rot_mat=tools.compute_rotation_matrix_from_quaternion(quat[None]).squeeze(),
img=img)
return sample
def get_dataloader(phase, config, sampling=1.):
dset = MyDataset(config.category, collection=phase, sampling=sampling, net_arch='vgg16')
dloader = DataLoader(dset, batch_size=config.batch_size, shuffle=phase == 'train', num_workers=config.num_workers)
return dloader
def data_len():
cate10 = ['bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet']
for cate in cate10:
print(cate)
dset = MyDataset(cate, collection='test', sampling=1, net_arch='vgg16')
print(len(dset))
if __name__ == '__main__':
from config import get_config
config = get_config('train')
dloader = get_dataloader('train', config, sampling=1)
print(len(dloader))
sample = next(iter(dloader))
imgs = sample.get('img').cpu().numpy().transpose((0, 2, 3, 1))
imgs = imgs * 255 + dloader.dataset.mean_pxl
imgs = imgs.astype(np.uint8)
for i, img in enumerate(imgs):
cv2.imwrite(f'/home/megabeast/test_bingham/data/{i}.png', img)
print()
| 1,876 | 28.793651 | 118 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/networks.py | import torch
from torch import nn
from torch import Tensor
from typing import Callable, Any, Optional, List
def get_network(config):
return MobileNetV2(config.num_classes)
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNActivation(nn.Sequential):
def __init__(
self,
in_planes: int,
out_planes: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Optional[Callable[..., nn.Module]] = None,
dilation: int = 1,
) -> None:
padding = (kernel_size - 1) // 2 * dilation
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if activation_layer is None:
activation_layer = nn.ReLU6
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, dilation=dilation, groups=groups,
bias=False),
norm_layer(out_planes),
activation_layer(inplace=True)
)
self.out_channels = out_planes
# necessary for backwards compatibility
ConvBNReLU = ConvBNActivation
class InvertedResidual(nn.Module):
def __init__(
self,
inp: int,
oup: int,
stride: int,
expand_ratio: int,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers: List[nn.Module] = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
])
self.conv = nn.Sequential(*layers)
self.out_channels = oup
self._is_cn = stride > 1
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(
self,
num_classes: int = 1000,
width_mult: float = 1.0,
inverted_residual_setting: Optional[List[List[int]]] = None,
round_nearest: int = 8,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features: List[nn.Module] = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, 256),
nn.BatchNorm1d(256),
nn.ReLU6(inplace=True),
nn.Linear(256, 64),
nn.BatchNorm1d(64),
nn.ReLU6(inplace=True),
nn.Linear(64, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
x = nn.functional.adaptive_avg_pool2d(x, (1, 1)).reshape(x.shape[0], -1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
if __name__ == '__main__':
net = MobileNetV2(6)
input = torch.randn(4, 3, 227, 227)
print(net(input).shape)
| 8,009 | 35.244344 | 116 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/trainval_workdir.py | """
@Author : Shuai Liao
"""
# import matplotlib
# matplotlib.use('Agg') # server mode.
#
import torch
import torch.optim
#
import os, sys, time
from time import gmtime, strftime
import numpy as np
from math import pi
from easydict import EasyDict as edict
from collections import OrderedDict as odict
#
from basic.common import Open, env, add_path, RefObj as rdict, argv2dict, is_py3
this_dir = os.path.dirname(os.path.realpath(__file__))
import pickle
#
from pytorch_util.libtrain.yaml_netconf import parse_yaml, import_module_v2
from pytorch_util.libtrain.tools import get_stripped_DataParallel_state_dict, patch_saved_DataParallel_state_dict
from txt_table_v1 import TxtTable
#
from tensorboardX import SummaryWriter
from tqdm import tqdm
# =========== Parsing from working path =========
pwd = os.getcwd() # Assume: $base_dir/S3.3D_Rotation/{MtdFamily}/{MtdType}
MtdFamily, MtdType = pwd.split(os.sep)[-2:]
# ================================================
# ------- args from convenient run yaml ---------
# For the purpose that no need to specific each run (without argparse)
convenient_run_argv_yaml = \
'''
MtdFamily : {MtdFamily} # e.g. regQuatNet
MtdType : {MtdType} # e.g. reg_Direct, reg_Sexp, reg_Sflat
net_module : {net_module} # same as MtdType here, namely import from 'MtdType'.py
net_arch : {net_arch} # e.g. alexnet, vgg16
base_dir : {base_dir} # e.g. path/to/S3.3D_Rotation
LIB_DIR : {base_dir}/lib
train_view : 100V # 20V #
work_dir : './snapshots/{net_arch}'
nr_epoch : 150
test_step_epoch : 10
this_dir : {this_dir}
base_lr : 0.001
'''.format(net_arch='alexnet',
base_dir=this_dir,
this_dir=pwd,
#
MtdFamily=MtdFamily,
MtdType=MtdType,
net_module=MtdFamily,
)
run_args = parse_yaml(convenient_run_argv_yaml)
# ------- arg from argparse -----------------
import argparse
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('conf_yml_file', default='', type=str, metavar='PATH',
help='path to conf_yml_file (default: none)')
# parser.add_argument('work_dir' , default='', type=str, metavar='PATH',)
# help='path to work_dir (default: none)')
parser.add_argument('gpu_ids', default='', type=str, metavar='PATH',
help='e.g. 0 or 0,1,2,3')
parser.add_argument('--resume', action="store_true", default=False,
help='to resume by the last checkpoint')
parser.add_argument('--pretrain', default=None, type=str, metavar='PATH',
help='path to pretrained checkpoint (default: none)')
parser.add_argument('--optimizer', default='SGD', type=str, help='SGD or Adam')
parser.add_argument('--test_only', action="store_true", default=False,
help='only do test once.')
_pargs, _rest = parser.parse_known_args() # parser.parse_args()
# parse the rest undefined args with "--key=value" form.
_cmd_args = argv2dict(_rest)
_cmd_args.update(vars(_pargs))
#
run_args.update(_cmd_args)
from string import Template
template_str = open(os.path.join(this_dir, 'conf_template.yml')).read()
template = Template(template_str)
print(run_args)
conf_yml_str = template.substitute(run_args)
# -- parse module_yml_file
opt = parse_yaml(conf_yml_str)
#
opt.update(run_args)
#
from ordered_easydict import Ordered_EasyDict as oedict
opt = oedict(opt) # Use opt for reference all configurations.
# ------ Import modules ----------
[(_dataset_module, _dataset_kwargs), netcfg] = import_module_v2(opt.IMPORT_dataset) # pred2angle
[(_net_module, _net_kwargs)] = import_module_v2(opt.IMPORT_makenet) # [_net_type]
[(eval_cates, _), (compute_geo_dists, _)] = import_module_v2(opt.IMPORT_eval.GTbox)
net_arch = opt.net_arch # or _net_kwargs.net_arch
_cfg = netcfg[net_arch] # [opt.net_arch]
np.random.seed(_cfg.RNG_SEED)
torch.manual_seed(_cfg.RNG_SEED)
if opt.use_gpu:
torch.cuda.manual_seed(_cfg.RNG_SEED)
# ---------------------------------------------------------------------------------------------------[dataset]
dataset_test = _dataset_module(collection='test', sampling=0.2, **_dataset_kwargs) #
dataset_train = _dataset_module(collection='train', **_dataset_kwargs) #
# change the sampling of dataset: e.g. sampling: {imagenet:1.0, synthetic:1.0}
# 'ModelNet10/SO3_100V.white_BG_golden_FG'
# From default.run.conf.yml.sh
if opt.cates is None:
opt.cates = dataset_train.cates
cates = opt.cates
if 'batch_size' in opt:
batch_size = opt.batch_size
else:
batch_size = _cfg.TRAIN.BATCH_SIZE
nr_GPUs = len(opt.gpu_ids)
assert nr_GPUs >= 1, opt.gpu_ids
if nr_GPUs > 1:
print('--------------------- Use multiple-GPU %s -------------------------' % opt.gpu_ids)
print(' batch_size = %s' % batch_size) # (batch_size*nr_GPUs)
print(' num_workers = %s' % (opt.num_workers * nr_GPUs))
#
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True, # batch_size*nr_GPUs
num_workers=opt.num_workers * nr_GPUs, pin_memory=opt.pin_memory,
sampler=None)
# ---------------------------------------------------------------------------------------------------[model]
print('[makenet] nr_cate: ', len(cates))
model = _net_module(nr_cate=len(cates), **_net_kwargs) # len(_cfg.cates))
if 'fix_conv1_conv2' in opt.keys() and opt.fix_conv1_conv2:
model.fix_conv1_conv2()
#
watch_targets = model.targets
# ---------------------------------------------------------------------------------------------------[optimizer]
params = []
for name, param in model.named_parameters():
print('----(*) ', name)
if param.requires_grad:
params.append(param)
print('[Optimizer] %s' % opt.optimizer)
if opt.optimizer == 'Adam':
optimizer = torch.optim.Adam(params, lr=opt.base_lr) # , weight_decay=opt.weight_decay)
elif opt.optimizer == 'SGD':
optimizer = torch.optim.SGD(params, opt.base_lr, # model.parameters(), opt.base_lr,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
else:
raise NotImplementedError
work_dir = opt.work_dir
work_dir += '.%s' % opt.train_view
_short_work_dir = os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir)) + 1:]
# global state variables.
start_it = 0
start_epoch = 0
from pytorch_util.libtrain import rm_models, list_models
from pytorch_util.libtrain.reducer import reducer, reducer_group
# Log file.
script_name, _ = os.path.splitext(os.path.basename(__file__))
log_filename = '%s/%s.log' % (work_dir, script_name)
if os.path.exists(log_filename): # backup previous content.
pre_log_content = open(log_filename).read()
logf = Open(log_filename, 'w')
def logprint(s):
print("\r%s " % s)
logf.write(s + "\n")
# -- Resume or use pretrained (Note not imagenet pretrain.)
assert not (opt.resume and opt.pretrain is not None), 'Only resume or pretrain can exist.'
if opt.resume:
iter_nums, net_name = list_models(work_dir) # ('snapshots')
assert len(iter_nums) > 0, "No models available"
latest_model_name = os.path.join(work_dir, '%s_iter_%s.pth.tar' % (net_name, iter_nums[-1]))
print('\n\nResuming from: %s \n\n' % latest_model_name)
if os.path.isfile(latest_model_name):
print("=> loading checkpoint '{}'".format(latest_model_name))
checkpoint = torch.load(latest_model_name)
start_it, start_epoch = checkpoint['it_and_epoch'] # mainly for control lr: (it, epoch)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
# fix for pytorch 4.0.x [https://github.com/jwyang/faster-rcnn.pytorch/issues/222]
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
print("=> loaded checkpoint '{}' (it_and_epoch {})"
.format(latest_model_name, checkpoint['it_and_epoch']))
else:
print("=> no checkpoint found at '{}'".format(latest_model_name)) # unnecessary line
elif opt.pretrain is not None:
print('\n\nUsing pretrained: %s \n\n' % opt.pretrain)
if os.path.isfile(opt.pretrain):
print("=> loading checkpoint '{}'".format(opt.pretrain))
checkpoint = torch.load(opt.pretrain)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (it_and_epoch {})"
.format(opt.pretrain, checkpoint['it_and_epoch']))
else:
print("=> no checkpoint found at '{}'".format(opt.pretrain))
# Check if use multi-gpus.
# Note should be after any "model.load_state_dict()" call!
if opt.use_gpu:
# model.cuda()
if nr_GPUs > 1: # multi-GPUs opt['mGPUs']:
# see: https://github.com/pytorch/examples/blob/master/imagenet/main.py
if net_arch.startswith('alexnet'):
model.trunk.Convs = torch.nn.DataParallel(model.trunk.Convs)
elif net_arch.startswith('vgg'):
model.trunk.features = torch.nn.DataParallel(model.trunk.features)
else:
model.trunk = torch.nn.DataParallel(model.trunk)
model.cuda()
if not os.path.exists(work_dir):
print("[Make new dir] ", work_dir)
os.makedirs(work_dir)
disp_interval = 10 if ('disp_interval' not in opt) else opt['disp_interval']
"""
(nr_iter * batch_size)/nr_train = nr_epoch where nr_train=28647/29786
(40000*200)/29786. = 268.6
(40000* 50)/29786. = 67.1
(40000*_cfg.TRAIN.BATCH_SIZE)/29786. / 2 /10
"""
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = int((40000 * batch_size) / 29786. / 2 / 10) * 10 # '2' mean at most decay 2 times.
lr = opt.base_lr * (0.1 ** (epoch // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def adjust_learning_rate_by_iter(optimizer, cur_iter, max_iter):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = max_iter // 3 # add just learning rate 3 times.
lr = opt.base_lr * (0.1 ** (max(cur_iter, 0) // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def adjust_learning_rate_by_epoch(optimizer, cur_epoch, max_epoch):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = max_epoch // 3 # add just learning rate 3 times.
lr = opt.base_lr * (0.1 ** (max(cur_epoch, 0) // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
def test(dataset_test, work_dir, test_model=None, marker='epoch'):
out_rslt_path = work_dir + '/temp.out_rslt_path.txt'
out_eval_path = work_dir + '/temp.out_eval_path.txt'
if test_model is None:
test_model = model
# ---- Load trained weights here.------
assert os.path.exists(work_dir)
#
iter_nums, net_name = list_models(work_dir, marker=marker)
saved_iter_num = iter_nums[-1]
pretrained_model = work_dir + '/%s_%s_%s.pth.tar' % (
net_name, marker, saved_iter_num) # select maxmun iter number.
print('[pretrained_model] ', pretrained_model)
checkpoint = torch.load(pretrained_model) # load weights here.
_state_dict = patch_saved_DataParallel_state_dict(checkpoint['state_dict'])
test_model.load_state_dict(_state_dict)
# switch to train mode
test_model.eval()
gLoss_redu = reducer_group(*watch_targets)
gPred_redu = reducer_group(*['quat'])
pre_time = time.time()
it = -1
epoch = -1
#
keys = dataset_test.keys
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=_cfg.TEST.BATCH_SIZE * nr_GPUs, shuffle=False,
num_workers=opt.num_workers * nr_GPUs, pin_memory=opt.pin_memory,
sampler=None)
with torch.no_grad():
pbar = tqdm(test_loader)
for _i_, sample_batched in enumerate(pbar):
pbar.set_description("[work_dir] %s " % _short_work_dir)
it += 1
# Note: Tensor.cuda() Returns a copy of this object in CUDA memory.
label = torch.autograd.Variable(sample_batched['label'].cuda(non_blocking=True))
data = torch.autograd.Variable(sample_batched['data'].cuda(non_blocking=True))
# formulate GT dict
_gt_targets = test_model.gt_targets if hasattr(test_model, 'gt_targets') else test_model.targets
GT = edict()
for tgt in _gt_targets:
GT[tgt] = torch.autograd.Variable(sample_batched[tgt].cuda(non_blocking=True))
# compute Pred output
Prob = test_model(data, label)
# compute Loss for each target and formulate Loss dictionary.
Loss = test_model.compute_loss(Prob, GT)
total_loss = 0
for tgt in watch_targets:
total_loss += Loss[tgt]
# predict target angles value
Pred = test_model.compute_pred(Prob)
gLoss_redu.collect(Loss) # pass in dict of all loss (loss_a, loss_e, loss_t).
gPred_redu.collect(Pred, squeeze=False)
# print loss info
cur_time = time.time()
time_consume = cur_time - pre_time
pre_time = cur_time
print('\r %s [test-iter] %5d / %5d ---------[time_consume] %.2f' % (
strftime("%Y-%m-%d %H:%M:%S", gmtime()), it, len(test_loader), time_consume))
for trgt in watch_targets:
_loss = Loss[trgt].data.cpu().numpy().copy()
print(' %-15s loss=%.3f' % (trgt, _loss))
if np.isnan(_loss):
print("[Warning] Weights explode! Stop training ... ")
exit(-1)
# pbar.set_description("[work_dir] %s " % os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:])
# print ("\r[work_dir] %s \r" % os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:],end='')
sys.stdout.flush()
pred_quats = gPred_redu.reduce()['quat']
# -- Write result to file (Format: # {obj_id} {a} {e} {t} )
txtTbl = TxtTable('{obj_id:<20s} {a:>6.4f} {b:>6.4f} {c:>6.4f} {d:>6.4f}')
rslt_lines = [txtTbl.getHeader()]
for _k, _quat in zip(keys, pred_quats):
_a, _b, _c, _d = _quat
rslt_line = txtTbl.format(_k, _a, _b, _c, _d)
rslt_lines.append(rslt_line)
rslt_lines = '\n'.join(rslt_lines)
Open(out_rslt_path, 'w').write(rslt_lines)
#
print('[out_rslt_path]', out_rslt_path)
# -- Do evaluation ('MedError', 'Acc@theta')
from numpy_db import npy_table
rc_tbl = npy_table(dataset_test.recs)
#
summary_str = eval_cates(out_rslt_path, rc_tbl, cates=opt.cates,
theta_levels_str='pi/6 pi/12 pi/24') # ['aeroplane','boat','car'])
Open(out_eval_path, 'w').write(summary_str)
print(summary_str)
reca = TxtTable().load_as_recarr(out_eval_path, fields=['MedError', 'Acc@pi/6', 'Acc@pi/12', 'Acc@pi/24'])
return reca[-1]
def train(nr_disp=5000):
os.system('rm -rf %s ' % (work_dir + "/logs"))
logger = SummaryWriter(work_dir + "/logs")
nr_epoch = opt.nr_epoch
nr_iter = opt.nr_epoch * (len(dataset_train) / batch_size) # 130800
# based on iter
disp_interval = int(nr_iter / nr_disp)
pre_time = time.time()
it = start_it - 1 # -1
epoch = start_epoch - 1 # -1
#
while epoch < nr_epoch:
epoch += 1
# Do test first
if epoch % opt.test_step_epoch == 0:
mederr, acc6, acc12, acc24 = test(dataset_test, work_dir, model)
logger.add_scalars('acc/test',
{'MedError': mederr, 'Acc@pi/6': acc6, 'Acc@pi/12': acc12, 'Acc@pi/24': acc24},
epoch + 1)
# switch to train mode
model.train()
lr = adjust_learning_rate_by_epoch(optimizer, epoch, nr_epoch) # opt.base_lr
pbar = tqdm(train_loader)
for _i_, sample_batched in enumerate(pbar):
pbar.set_description("[work_dir] %s B=%s " % (_short_work_dir, batch_size))
rec_inds = sample_batched['idx'].numpy()
#
it += 1
label = torch.autograd.Variable(sample_batched['label'].cuda(non_blocking=True))
data = torch.autograd.Variable(sample_batched['data'].cuda(non_blocking=True))
# formulate GT dict
_gt_targets = model.gt_targets if hasattr(model, 'gt_targets') else model.targets
GT = edict()
for tgt in _gt_targets:
GT[tgt] = torch.autograd.Variable(sample_batched[tgt].cuda(non_blocking=True))
# compute Pred output
Prob = model(data, label)
# compute Loss for each target and formulate Loss dictionary.
Loss = model.compute_loss(Prob, GT)
total_loss = 0
for tgt in watch_targets:
total_loss += Loss[tgt] # * loss_weight
# compute gradient and do SGD step
optimizer.zero_grad() # Clears the gradients of all optimized Variable s.
total_loss.backward()
optimizer.step()
logger.add_scalars('loss_iter', Loss, it + 1)
# logger.add_scalar('grad_norm/fc7', fc7_gradNorm, it+1)
# print loss info
if it % disp_interval == 0: # or (it+1)==len(dataset_train)/batch_size:
cur_time = time.time()
time_consume = cur_time - pre_time
pre_time = cur_time
logprint(
'%s [epoch] %3d/%3d [iter] %5d -----------------------------------[time_consume] %.2f lr=%.8f'
% (strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch + 1, nr_epoch, it + 1, time_consume, lr))
for tgt in watch_targets:
_loss = Loss[tgt].data.cpu().numpy().copy()
logprint(' %-15s loss=%.3f' % (tgt, _loss))
if np.isnan(_loss):
print("[Warning] Weights explode! Stop training ... ")
exit(-1)
# Compute Acc@theta
recs = dataset_train.recs[rec_inds]
Pred = model.compute_pred(Prob)
geo_dists = compute_geo_dists(Pred['quat'], recs.so3.quaternion)
MedError = np.median(geo_dists) / np.pi * 180.
theta_levels = odict(zip(['pi/6', 'pi/12', 'pi/24'], [np.pi / 6, np.pi / 12, np.pi / 24]))
# # {'pi/6':np.pi/6, 'pi/12':np.pi/12, 'pi/24':np.pi/24})
Acc_at_ts = odict([(tname, sum(geo_dists < tvalue) / float(len(geo_dists))) for tname, tvalue in
theta_levels.items()])
logger.add_scalars('acc/train', Acc_at_ts, it + 1)
acc_str = ' '.join(['[%s] %3.1f%%' % (k, Acc_at_ts[k] * 100) for k, v in theta_levels.items()])
logprint(' Acc@{ %s } ' % acc_str)
# pbar.set_description("[work_dir] %s B=%s \r" % (os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:], batch_size))
# print ("\r[work_dir] %s B=%s \r" % (os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:], batch_size), end='')
sys.stdout.flush()
#
#
if (epoch + 1) % opt.snapshot_step_epoch == 0:
save_checkpoint({
'it_and_epoch': (it, epoch),
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, filename=os.path.join(work_dir, 'train_epoch_%s.pth.tar' % (epoch + 1)))
logger.close()
if __name__ == '__main__':
if opt.test_only:
test(dataset_test, work_dir)
else:
train()
rm_models(work_dir, marker='epoch')
| 20,512 | 39.700397 | 146 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/agent.py | from utils import TrainClock, KSchedule
import os
import torch
import torch.optim as optim
import torch.nn as nn
from tensorboardX import SummaryWriter
import sys
BASEPATH = os.path.dirname(__file__)
sys.path.append(os.path.join(BASEPATH, '..', '..', 'utils'))
import tools
import rpmg
from networks import get_network
def get_agent(config):
return MyAgent(config)
class MyAgent(object):
"""Base trainer that provides common training behavior.
All customized trainer should be subclass of this class.
"""
def __init__(self, config):
self.config = config
self.clock = TrainClock()
self.k_schedule = KSchedule(config.k_init, config.k_safe, config.max_iters)
self.net = get_network(config).cuda()
self.optimizer = optim.Adam(self.net.parameters(), config.lr)
self.criterion = torch.nn.MSELoss(reduction='sum')
self.writer = SummaryWriter(log_dir=self.config.log_dir)
rpmg.logger_init(self.writer)
def adjust_learning_rate_by_epoch(self, optimizer, cur_epoch, max_epoch):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = max_epoch // 3 # add just learning rate 3 times.
lr = self.config.lr * (0.1 ** (max(cur_epoch, 0) // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_ckpt(self, name=None):
"""save checkpoint during training for future restore"""
if name is None:
save_path = os.path.join(self.config.model_dir, "ckpt_epoch{}.pth".format(self.clock.epoch))
print("Saving checkpoint epoch {}...".format(self.clock.epoch))
else:
save_path = os.path.join(self.config.model_dir, "{}.pth".format(name))
if isinstance(self.net, nn.DataParallel):
model_state_dict = self.net.module.cpu().state_dict()
else:
model_state_dict = self.net.cpu().state_dict()
torch.save({
'clock': self.clock.make_checkpoint(),
'model_state_dict': model_state_dict,
'optimizer_state_dict': self.optimizer.state_dict(),
}, save_path)
self.net.cuda()
def load_ckpt(self, name=None):
"""load checkpoint from saved checkpoint"""
name = name if name == 'latest' else "ckpt_epoch{}".format(name)
load_path = os.path.join(self.config.model_dir, "{}.pth".format(name))
if not os.path.exists(load_path):
raise ValueError("Checkpoint {} not exists.".format(load_path))
checkpoint = torch.load(load_path)
print("Loading checkpoint from {} ...".format(load_path))
if isinstance(self.net, nn.DataParallel):
self.net.module.load_state_dict(checkpoint['model_state_dict'])
else:
self.net.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.clock.restore_checkpoint(checkpoint['clock'])
def forward(self, data):
img = data.get('img').cuda()
gt = data.get('rot_mat').cuda() # (b, 3, 3)
pred = self.net(img) # (b, 9)
if 'RPMG' in self.config.mode:
if self.config.is_train:
k = self.k_schedule.get_k(self.clock.iteration)
else:
# k is only used in training backward
k = 0
pred_orth = rpmg.RPMG.apply(pred, k, 0.01, gt, self.clock.iteration)
loss = self.criterion(pred_orth, gt)
elif '9D' in self.config.mode:
pred_orth = tools.symmetric_orthogonalization(pred)
if self.config.mode == '9D_SVD':
loss = self.criterion(pred_orth, gt)
elif self.config.mode == '9D_inf':
loss = self.criterion(pred, gt.flatten(1))
else:
raise NotImplementedError
elif '6D' in self.config.mode:
pred_orth = tools.compute_rotation_matrix_from_ortho6d(pred)
if self.config.mode == '6D_GM':
loss = self.criterion(pred_orth, gt)
elif self.config.mode == '6D_inf':
gt_6d = torch.cat(gt[:, :, 0], gt[:, :, 1], 1)
loss = self.criterion(pred, gt_6d)
else:
raise NotImplementedError
elif '4D' in self.config.mode:
pred_orth = tools.compute_rotation_matrix_from_quaternion(pred)
if self.config.mode == '4D_norm':
loss = self.criterion(pred_orth, gt)
elif self.config.mode == '4D_inf':
gt_q = tools.compute_quaternions_from_rotation_matrices(gt) # (b, 4)
loss = self.criterion(pred, gt_q)
else:
raise NotImplementedError
elif 'axis_angle' in self.config.mode:
pred_orth = tools.compute_rotation_matrix_from_axisAngle(pred)
loss = self.criterion(pred_orth, gt)
elif 'euler' in self.config.mode:
pred_orth = tools.compute_rotation_matrix_from_euler(pred)
loss = self.criterion(pred_orth, gt)
elif '10D' in self.config.mode:
pred_orth = tools.compute_rotation_matrix_from_10d(pred)
loss = self.criterion(pred_orth, gt)
else:
raise NotImplementedError
err_deg = torch.rad2deg(tools.compute_geodesic_distance_from_two_matrices(pred_orth, gt)) # batch
return pred, loss, err_deg
def train_func(self, data):
"""one step of training"""
self.net.train()
pred, loss, err_deg = self.forward(data)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return pred, loss, err_deg
def val_func(self, data):
"""one step of validation"""
self.net.eval()
with torch.no_grad():
pred, loss, err_deg = self.forward(data)
return pred, loss, err_deg
if __name__ == '__main__':
max_epoch = 1000
_N_ = max_epoch // 3 # add just learning rate 3 times.
for cur_epoch in range(1000):
lr = 1e-3 * (0.1 ** (max(cur_epoch, 0) // _N_)) # 300))
if cur_epoch % 10 == 0:
print(f'epoch {cur_epoch}: {lr}')
| 6,269 | 37.703704 | 106 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/regQuatNet/regQuatNet.py | # coding: utf8
"""
@Author : Shuai Liao
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
import torch.nn.functional as F
from basic.common import rdict
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
from pytorch_util.netutil.common_v2.trunk_alexnet_bvlc import AlexNet_Trunk
from pytorch_util.netutil.common_v2.trunk_vgg import VGG16_Trunk
from pytorch_util.netutil.common_v2.trunk_resnet import ResNet101_Trunk, ResNet50_Trunk
net_arch2Trunk = dict(
alexnet=AlexNet_Trunk,
vgg16=VGG16_Trunk,
resnet101=ResNet101_Trunk,
resnet50=ResNet50_Trunk
)
from pytorch_util.libtrain import copy_weights, init_weights_by_filling
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit, exp_Normalization
from basic.common import env, add_path
from lib.helper import *
loss_balance = 4.
def cls_pred(output, topk=(1,)):
maxk = max(topk)
batch_size = output.size(0)
_, pred = output.topk(maxk, 1, True, True)
return pred
def reg2d_pred2tgt(pr_sin, pr_cos):
theta = torch.atan2(pr_sin, pr_cos)
return theta
class _BaseReg_Net(nn.Module):
#
@staticmethod
def head_seq(in_size, reg_n_D, nr_cate=12, nr_fc8=334, init_weights=True): # in_size=4096
seq = nn.Sequential(
nn.Linear(in_size, nr_fc8), # Fc8
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(nr_fc8, nr_cate * reg_n_D), # Prob
)
if init_weights:
init_weights_by_filling(seq, gaussian_std=0.005, kaiming_normal=True) # fill weight with gaussian filler
return seq
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True): # AlexNet_Trunk
super(_BaseReg_Net, self).__init__()
_Trunk = net_arch2Trunk[net_arch]
self.trunk = _Trunk(init_weights=init_weights)
self.nr_cate = nr_cate
self.top_size = 4096 if not self.trunk.net_arch.startswith('resnet') else 2048
def forword(self, x, label):
raise NotImplementedError
# ---------------------------------------------------------------------[reg_Direct]
class reg_Direct_Net(_BaseReg_Net): # No L2 norm at all,
""" No any L2 normalization to guarantee prediction is on n-sphere, smooth l1 loss is used. """
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True):
_BaseReg_Net.__init__(self, nr_cate=nr_cate, net_arch=net_arch, init_weights=init_weights)
self.nr_cate = nr_cate
self.reg_n_D = 4
# -- Head architecture
# Note: for quaternion, there's only one regression head (instead of 3 Euler angles (a,e,t)).
# Thus, nr_fc8=996 (see design.py)
self.head_quat = self.head_seq(self.top_size, self.reg_n_D, nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# for maskout specific category
self.maskout = Maskout(nr_cate=nr_cate)
# loss module
self.loss_handler = Smooth_L1_Loss_Handler()
self.targets = ['quat']
def forward(self, x, label):
"""label shape (batchsize, ) """
x = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x.size(0) # .split(1, dim=1)
# Note: quat(a,b,c,d) is on a 4d sphere and (x^2+y^2=1)
x_quat = self.maskout(self.head_quat(x).view(batchsize, self.nr_cate, self.reg_n_D), label)
# -- Normalize coordinate to a unit
# x_quat = norm2unit(x_quat) #, nr_cate=self.nr_cate)
Prob = edict(quat=x_quat)
return Prob
def compute_loss(self, Prob, GT):
Loss = self.loss_handler.compute_loss(self.targets, Prob, GT)
return Loss
@staticmethod
def compute_pred(Prob):
x_quat = Prob['quat']
# -- Normalize coordinate to a unit
x_quat = norm2unit(x_quat) # Note: here we do l2 normalization. Just to make predicted quaternion a unit norm.
#
batchsize = x_quat.size(0)
# Get cpu data.
batch_data = x_quat.data.cpu().numpy().copy()
assert batch_data.shape == (batchsize, 4), batch_data.shape
Pred = edict(quat=batch_data)
return Pred
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------[reg_Sflat]
class reg_Sflat_Net(_BaseReg_Net):
""" L2 normalization activation, with cosine proximity loss. """
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True):
_BaseReg_Net.__init__(self, nr_cate=nr_cate, net_arch=net_arch, init_weights=init_weights)
self.nr_cate = nr_cate
self.reg_n_D = 4
# -- Head architecture
# Note: for quaternion, there's only one regression head (instead of 3 (for a,e,t)).
# Thus, nr_fc8=996 (see design.py)
self.head_quat = self.head_seq(self.top_size, self.reg_n_D, nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# for maskout a,e,t
self.maskout = Maskout(nr_cate=nr_cate)
# loss module
self.loss_handler = Cos_Proximity_Loss_Handler()
self.targets = ['quat']
def forward(self, x, label):
"""label shape (batchsize, ) """
x = self.trunk(x)
#
batchsize = x.size(0)
# Note: quat(a,b,c,d) is on a 4d sphere and (x^2+y^2=1)
x_quat = self.maskout(self.head_quat(x).view(batchsize, self.nr_cate, self.reg_n_D), label)
# -- Normalize coordinate to a unit
x_quat = norm2unit(x_quat) # , nr_cate=self.nr_cate)
Prob = edict(quat=x_quat)
return Prob
def compute_loss(self, Prob, GT):
Loss = self.loss_handler.compute_loss(self.targets, Prob, GT)
return Loss
@staticmethod
def compute_pred(Prob):
x_quat = Prob['quat']
batchsize = x_quat.size(0)
# Get cpu data.
batch_data = x_quat.data.cpu().numpy().copy()
assert batch_data.shape == (batchsize, 4), batch_data.shape
Pred = edict(quat=batch_data)
return Pred
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------[reg_Sexp]
class reg_Sexp_Net(_BaseReg_Net):
""" Spherical exponential activation + Sign classification, with cosine proximity loss """
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True):
_BaseReg_Net.__init__(self, nr_cate=nr_cate, net_arch=net_arch, init_weights=init_weights)
self.nr_cate = nr_cate
self.reg_n_D = 4
# Note: for a quaternion q=(a,b,c,d), we always ensure a>0, that this cos(theta/2)>0 --> theta in [0,pi]
# Thus only b,c,d need sign prediction.
dim_need_sign = 3
_signs = list(product(*([(-1, 1)] * dim_need_sign))) # [(-1, -1, -1), (-1, -1, 1), ..., (1, 1, 1)], with len=8
self.signs = [(1,) + x for x in _signs] # [(1, -1, -1, -1), (1, -1, -1, 1), ..., (1, 1, 1, 1)], with len=8
self.signs2label = odict(zip(self.signs, range(len(self.signs))))
self.label2signs = Variable(torch.FloatTensor(self.signs)).cuda() # make it as a Variable
# -- Head architecture
# Note: for quaternion, there's only one regression head (instead of 3 (for a,e,t)).
# Thus, nr_fc8=996 (see design.py)
self.head_sqrdprob_quat = self.head_seq(self.top_size, self.reg_n_D, nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# each of 3 quaternion complex component can be + or -, that totally 2**3 possible sign categories.
self.head_signcate_quat = self.head_seq(self.top_size, len(self.signs), nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# for abs branch
self.maskout = Maskout(nr_cate=nr_cate)
self.softmax = nn.Softmax(dim=1).cuda()
# for sgc branch
self.maskout_sgc = Maskout(nr_cate=nr_cate) # make a new layer to maskout sign classification only.
# loss module
self.loss_handler_abs_quat = Cos_Proximity_Loss_Handler() # Neg_Dot_Loss_Handler() # Cos_Proximity_Loss_Handler() #
self.loss_handler_sgc_quat = Cross_Entropy_Loss_Handler()
self.targets = ['abs_quat', 'sgc_quat']
self.gt_targets = ['quat']
def forward(self, x, label):
"""label shape (batchsize, ) """
x = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x.size(0)
# Note: squared probability
x_sqr_quat = self.maskout(self.head_sqrdprob_quat(x).view(batchsize, self.nr_cate, self.reg_n_D),
label) # ========>>>>> Maskout output (B,4) hook gradient.
# -- Exp and Normalize coordinate to a unit
x_sqr_quat = self.softmax(x_sqr_quat) # , nr_cate=self.nr_cate)
# sign category head (totally 2^4=16 category)
x_sgc_quat = self.maskout_sgc(self.head_signcate_quat(x).view(batchsize, self.nr_cate, len(self.signs)), label)
Prob = edict(abs_quat=torch.sqrt(x_sqr_quat), sgc_quat=x_sgc_quat)
return Prob
def compute_loss(self, Prob, GT):
# First get sign label from GT
# == Formulate absolute value of quaternion
GT_abs_quat = torch.abs(GT.quat)
# == Formulate signs label of quaternion
GT_sign_quat = torch.sign(GT.quat)
GT_sign_quat[GT_sign_quat == 0] = 1 # make sign of '0' as 1
signs_tuples = [tuple(x) for x in GT_sign_quat.data.cpu().numpy().astype(np.int32).tolist()]
for signs_tuple in signs_tuples: # q and -q gives the same rotation.
assert signs_tuple[
0] > 0, "Need GT to be all positive on first dim of quaternion: %s" % GT # assert all quaternion first dim is positive.
# signs label
GT_sgc_quat = Variable(torch.LongTensor([self.signs2label[signs_tuple] for signs_tuple in signs_tuples]))
if GT.quat.is_cuda:
GT_sgc_quat = GT_sgc_quat.cuda()
# here just because compute_loss need a same key from Prob and GT,
# so we just give a fake name to GT.sqr_quat as '_GT.logsqr_quat'.
_GT = edict(abs_quat=GT_abs_quat, sgc_quat=GT_sgc_quat)
Loss_abs_quat = self.loss_handler_abs_quat.compute_loss(['abs_quat'], Prob, _GT)
Loss_sgc_quat = self.loss_handler_sgc_quat.compute_loss(['sgc_quat'], Prob, _GT)
# To add loss weights here.
Loss = edict(abs_quat=Loss_abs_quat['abs_quat'] * 10, # / 5.
sgc_quat=Loss_sgc_quat['sgc_quat'], )
return Loss
def compute_pred(self, Prob):
x_abs_quat = Prob['abs_quat'] # torch.sqrt(torch.exp(Prob['logsqr_quat']))
x_sgc_quat = Prob['sgc_quat']
batchsize = x_abs_quat.size(0)
#
sign_ind = cls_pred(x_sgc_quat, topk=(1,)).data.view(-1, )
item_inds = torch.from_numpy(np.arange(batchsize)).cuda()
_label_shape = self.label2signs.size()
x_sign_quat = self.label2signs.expand(batchsize, *_label_shape)[item_inds, sign_ind]
x_quat = x_abs_quat * x_sign_quat
# Get cpu data.
batch_quat = x_quat.data.cpu().numpy().copy()
batchsize = x_quat.size(0)
assert batch_quat.shape == (batchsize, 4), batch_quat.shape
#
Pred = edict(quat=batch_quat)
return Pred
# ---------------------------------------------------------------------
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
model = reg2D_Net().copy_weights()
# import numpy as np
dummy_batch_data = np.zeros((2, 3, 227, 227), dtype=np.float32)
dummy_batch_label = np.zeros((2, 1), dtype=np.int64)
dummy_batch_data = torch.autograd.Variable(torch.from_numpy(dummy_batch_data))
dummy_batch_label = torch.autograd.Variable(torch.from_numpy(dummy_batch_label))
Pred = model(dummy_batch_data, dummy_batch_label)
# print (Prob.a)
| 12,708 | 38.715625 | 143 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/lib/helper.py | # coding: utf8
"""
@Author : Shuai Liao
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
#
from collections import OrderedDict
class Cross_Entropy_Loss_Handler:
def __init__(self):
self.cross_entropy_loss = nn.CrossEntropyLoss().cuda()
# interface function
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names e.g. tgts=['a', 'e', 't']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
Loss = edict()
for tgt in tgts:
Loss[tgt] = self.cross_entropy_loss(Pred[tgt], GT[tgt])
return Loss
class Neg_Dot_Loss_Handler:
def __init_(self):
pass
def compute_loss(self, tgts, Pred, GT):
Loss = edict()
for tgt in tgts:
""" Bug fixed on 22 Aug 2018
torch.dot can only be applied to 1-dim tensor
Don't know why there's no error. """
# Loss[tgt] = torch.mean( -torch.dot(GT[tgt],Pred[tgt]) ) # In fact here only does -GT[tgt]*Pred[tgt]
Loss[tgt] = torch.mean(-torch.sum(GT[tgt] * Pred[tgt], dim=1))
return Loss
class Cos_Proximity_Loss_Handler:
def __init__(self):
# self.period_l1_loss = PeriodL1Loss(period=period).cuda()
self.cos_sim = nn.CosineSimilarity().cuda()
# interface function
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names. In this case has to be tgts=['quat']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
# assert tgts==['quat'], tgts
Loss = edict()
for tgt in tgts:
Loss[tgt] = torch.mean(1 - self.cos_sim(Pred[tgt], GT[tgt])) # use 1-cos(theta) to make loss as positive.
return Loss
class Smooth_L1_Loss_Handler:
def __init__(self):
self.smooth_l1_loss = nn.SmoothL1Loss().cuda()
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names e.g. tgts=['a', 'e', 't']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
Loss = edict()
for tgt in tgts:
Loss[tgt] = self.smooth_l1_loss(Pred[tgt], GT[tgt]) # [warning] pred first, gt second
return Loss
| 2,518 | 30.4875 | 118 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/lib/datasets/Dataset_Base.py | """
@Author : Shuai Liao
"""
import os, sys
import numpy as np
from math import ceil, floor, pi
import torch
from torch.utils.data import Dataset, DataLoader
from collections import OrderedDict as odict
import cv2
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(os.path.join(path, 'pylibs'))
from basic.common import add_path, env, rdict, cv2_wait, cv2_putText, is_py3
if is_py3:
import pickle
else:
import cPickle as pickle
from lmdb_util import ImageData_lmdb
from numpy_db import npy_table, npy_db, dtype_summary, reorder_dtype
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = this_dir + '/../../../dataset' # where the dataset directory is.
assert os.path.exists(base_dir)
cate10 = ['bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet']
cate40 = ['airplane', 'bathtub', 'bed', 'bench', 'bookshelf', 'bottle', 'bowl',
'car', 'chair', 'cone', 'cup', 'curtain', 'desk', 'door', 'dresser',
'flower_pot', 'glass_box', 'guitar', 'keyboard', 'lamp', 'laptop',
'mantel', 'monitor', 'night_stand', 'person', 'piano', 'plant',
'radio', 'range_hood', 'sink', 'sofa', 'stairs', 'stool',
'table', 'tent', 'toilet', 'tv_stand', 'vase', 'wardrobe', 'xbox']
## Net configurations that are independent of task
netcfg = rdict( # configuration for alexnet
alexnet=rdict(TRAIN=rdict(BATCH_SIZE=200),
TEST=rdict(BATCH_SIZE=200),
INPUT_SHAPE=(227, 227), # resize_shape
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ), # ignore_label=-1,
# configuration for vgg
vgg16=rdict(TRAIN=rdict(BATCH_SIZE=40), # 64 20
TEST=rdict(BATCH_SIZE=20),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
# configuration for resnet50
resnet50=rdict(TRAIN=rdict(BATCH_SIZE=100), # 128
TEST=rdict(BATCH_SIZE=64),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
# configuration for resnet101
resnet101=rdict(TRAIN=rdict(BATCH_SIZE=64),
TEST=rdict(BATCH_SIZE=20),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
# configuration for resnet152
resnet152=rdict(TRAIN=rdict(BATCH_SIZE=32),
TEST=rdict(BATCH_SIZE=10),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
)
def get_anno(db_path, category_target): # target=''
# TO Move to generation of data db.
viewID2quat = pickle.load(open(os.path.join(db_path, 'viewID2quat.pkl'), 'rb'), encoding='latin1')
viewID2euler = pickle.load(open(os.path.join(db_path, 'viewID2euler.pkl'), 'rb'), encoding='latin1')
keys = np.array(list(viewID2quat.keys()))
idx_target = []
add_path(this_dir)
from db_type import img_view_anno
rcs = np.zeros((len(viewID2quat.keys()),), dtype=img_view_anno).view(np.recarray)
for i, (key, quat) in enumerate(viewID2quat.items()):
rc = rcs[i]
rc.img_id = key # bathtub_0107.v001
cad_id, viewId = key.split('.')
category = cad_id[:cad_id.rfind('_')]
if category == category_target:
idx_target.append(i)
rc.category = category
rc.cad_id = cad_id
rc.so3.quaternion = quat if quat[0] > 0 else -quat # q and -q give the same rotation matrix.
# Make sure all q[0]>0, that is rotation angle in [0,pi]
rc.so3.euler = viewID2euler[key]
rcs_cate = rcs[idx_target]
keys_cate = keys[idx_target]
return keys_cate, rcs_cate
class Dataset_Base(Dataset):
collection2dbname = \
dict(train='train_100V.Rawjpg.lmdb', # 'train_20V.Rawjpg.lmdb'
test='test_20V.Rawjpg.lmdb',
)
def __init__(self, category, collection='train', net_arch='alexnet', sampling=1.0, sample_inds=None):
self.net_arch = net_arch
self.cfg = netcfg[net_arch]
self.collection = collection
self.cates = cate10
#
self.cate2ind = odict(zip(self.cates, range(len(self.cates))))
# get im_db
self.db_path = os.path.join(base_dir, 'ModelNet10-SO3', self.collection2dbname[collection])
assert self.db_path is not None, '%s is not exist.' % (self.db_path)
self.datadb = ImageData_lmdb(self.db_path)
# Get anno
self.keys, self.recs = get_anno(self.db_path, category)
assert sampling > 0 and sampling <= 1.0, sampling
if sampling < 1.0:
print('Sampling dataset: %s' % sampling)
_inds = np.arange(len(self.keys))
sample_inds = np.random.choice(_inds, size=int(len(_inds) * sampling), replace=False)
sample_inds.sort()
self.keys, self.recs = [self.keys[x] for x in sample_inds], self.recs[sample_inds]
elif sample_inds is not None:
self.keys, self.recs = [self.keys[x] for x in sample_inds], self.recs[sample_inds]
self.key2ind = dict(zip(self.keys, range(len(self.keys))))
# self.resize_shape = rsz_shape
self.mean_pxl = np.array([102.9801, 115.9465, 122.7717], np.float32)
#
def _image2data(self, img, data_normal_type='caffe'):
if self.net_arch == 'alexnet':
img = np.pad(img, [(0, 3), (0, 3), (0, 0)], mode='edge') # (0,0,3,3)
# caffe-style
if data_normal_type == 'caffe':
# Subtract mean pixel
data = (img - self.mean_pxl).astype(np.float32) / 255.
# Transpose
data = data.transpose((2, 0, 1)) # H,W,C -> C,H,W
elif data_normal_type == 'pytorch':
# -# img = cv2.cvtColor( img, cv2.COLOR_GRAY2RGB )
# -# if self.transform is not None:
# -# img = self.transform(img) # return (3,224,224)
raise NotImplementedError
else:
raise NotImplementedError
return data
def _get_image(self, rc):
img_id = rc.img_id
img = self._image2data(self.datadb[img_id])
return img # if not flip else cv2.flip( img, 1 )
def __len__(self):
return len(self.recs)
def __getitem__(self, idx):
rcobj = self.recs[idx]
cate = rcobj.category
obj_id = rcobj.obj_id
image_id = rcobj.src_img.image_id
""" To implement construction of sample dictionary.
To get image data: call 'self.roiloader(rcobj)'
"""
print('This is an interface method, and you need to implement it in inherited class.')
raise NotImplementedError
def get_recs(self, query_keys):
inds = [self.key2ind[k] for k in query_keys]
return self.recs[inds]
# interface method
def _vis_minibatch(self, sample_batched):
"""Visualize a mini-batch for debugging."""
for i, (idx, label, quat, data) in enumerate(zip(sample_batched['idx'], # note: these are tensors
sample_batched['label'],
sample_batched['quat'],
sample_batched['data'])):
rc = self.recs[idx]
# print idx
im = data.numpy().transpose((1, 2, 0)).copy()
im += self.cfg.PIXEL_MEANS
im = im.astype(np.uint8) # xmin, ymax
a, b, c, d = quat
cv2_putText(im, (0, 20), rc.category, bgcolor=(255, 255, 255))
text = '%.1f %.1f %.1f %.1f' % (a, b, c, d)
cv2_putText(im, (0, 40), text, bgcolor=(255, 255, 255))
cv2.imshow('im', im)
cv2_wait()
# pass
class Dataset_Example(Dataset_Base):
def __getitem__(self, idx):
rc = self.recs[idx]
cate = rc.category
# img_id = rc.img_id
quat = rc.so3.quaternion
#
sample = dict(idx=idx,
label=self.cate2ind[cate],
quat=quat,
data=self._get_image(rc))
return sample
if __name__ == '__main__':
def test_dataloader():
dataset = Dataset_Example(collection='test', sampling=0.2)
#
dataloader = DataLoader(dataset, batch_size=4,
shuffle=False, num_workers=1)
for i_batch, sample_batched in enumerate(dataloader):
dataset._vis_minibatch(sample_batched)
test_dataloader()
| 8,875 | 38.802691 | 108 | py |
RPMG | RPMG-main/ModelNet_Img/S3.3D_Rotation/lib/datasets/dataset_regQuatNet.py | """
@Author : Shuai Liao
"""
import numpy as np
from Dataset_Base import Dataset_Base, netcfg
import torch
from torch.utils.data import Dataset, DataLoader
import cv2
from basic.common import add_path, env, rdict, cv2_wait, cv2_putText
# ===============================================================
def pred2angle(a, e, t):
return (a * 180. / np.pi) % 360, e * 180. / np.pi, t * 180. / np.pi
def pred2angle_shift45(a, e, t):
# shift 45 degree back
return (a * 180. / np.pi - 45) % 360, e * 180. / np.pi, t * 180. / np.pi
class Dataset_regQuatNet(Dataset_Base):
def __init__(self, *args, **kwargs):
Dataset_Base.__init__(self, *args, **kwargs)
def __getitem__(self, idx):
rc = self.recs[idx]
cate = rc.category
quat = rc.so3.quaternion
sample = dict(idx=idx,
label=self.cate2ind[cate],
quat=quat,
data=self._get_image(rc))
return sample
# build class alias
Dataset_reg_Direct = Dataset_regQuatNet
Dataset_reg_Sexp = Dataset_regQuatNet
Dataset_reg_Sflat = Dataset_regQuatNet
if __name__ == '__main__':
np.random.seed(3)
def test_dataloader(collection='test', sampling=0.2):
dataset = Dataset_reg_Sexp(collection=collection, sampling=sampling)
print(len(dataset.keys))
anno_path = dataset.db_path
sampling_file = anno_path + '/%s_sampling%.2f.txt' % (collection, sampling)
with open(sampling_file, 'w') as f:
f.write('\n'.join(dataset.keys))
print('sampling_file:', sampling_file)
#
dataloader = DataLoader(dataset, batch_size=50,
shuffle=False, num_workers=1, sampler=None)
for i_batch, sample_batched in enumerate(dataloader):
print(sample_batched)
dataset._vis_minibatch(sample_batched)
test_dataloader()
| 1,914 | 27.58209 | 83 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/lmdb_util/imagedata_lmdb.py | """
@Author : Shuai Liao
"""
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from basic.common import add_path, env, Timer, Checkpoint, cv2_wait, is_py3
from basic.util import load_yaml, dump_yaml
import cv2
import numpy as np
import lmdb
from pprint import pprint
if is_py3:
import pickle
else:
import cPickle as pickle
import random
'''Prepare data to be store in lmdb.
Return key, value (serialized bytes data)
'''
class Data_Handler:
def pack(self, imgID, imgpath):
raise NotImplemented
def unpack(self, key):
raise NotImplemented
class Handle_Npyarr(Data_Handler):
def pack(self, imgID, imgpath):
img = cv2.imread(imgpath)
h, w, c = img.shape
#
key = imgID.encode('utf8') # ascii
value = img.dumps()
return key, value
def pack_from_npyarr(self, imgID, npyarr):
#
key = imgID.encode('utf8') # ascii
value = npyarr.dumps()
return key, value
def unpack(self, data_bytes):
if is_py3:
return np.loads(data_bytes, encoding='latin1')
else:
return np.loads(data_bytes)
class Handle_Rawjpg(Data_Handler):
def __init__(self, always_load_color=True):
if always_load_color:
self.cv2_IMREAD_FLAG = cv2.IMREAD_COLOR
else:
self.cv2_IMREAD_FLAG = cv2.IMREAD_UNCHANGED
def pack(self, imgID, imgpath):
rawbytes = open(imgpath, 'rb').read()
#
key = imgID.encode('utf8') # ascii
value = rawbytes
return key, value
def pack_from_npyarr(self, imgID, npyarr):
retval, buf = cv2.imencode('.jpg', npyarr)
assert retval == True
rawbytes = buf.tostring()
#
key = imgID.encode('utf8') # ascii
value = rawbytes
return key, value
def unpack(self, data_bytes):
data_npyarr = np.frombuffer(data_bytes, np.uint8)
img = cv2.imdecode(data_npyarr,
self.cv2_IMREAD_FLAG) # cv2.IMREAD_UNCHANGED) # IMREAD_COLOR) # CV_LOAD_IMAGE_COLOR)
return img
class Handle_Rawebp(Data_Handler):
"""Webp format for storage.
In comparison to the lossless compression of PNG,
or lossy compression of JPEG, Webp has noticeably advantage.
QUALITY from 0 to 100 (the higher is the better). Default value is 95.
"""
def __init__(self, always_load_color=True, QUALITY=95):
if always_load_color:
self.cv2_IMREAD_FLAG = cv2.IMREAD_COLOR
else:
self.cv2_IMREAD_FLAG = cv2.IMREAD_UNCHANGED
self.QUALITY = QUALITY
def pack(self, imgID, imgpath):
rawbytes = open(imgpath, 'rb').read()
#
key = imgID.encode('utf8') # ascii
value = rawbytes
return key, value
def pack_from_npyarr(self, imgID, npyarr):
retval, buf = cv2.imencode('.webp', npyarr, [cv2.IMWRITE_WEBP_QUALITY, self.QUALITY])
assert retval == True
rawbytes = buf.tostring()
#
key = imgID.encode('utf8') # ascii
value = rawbytes
return key, value
def unpack(self, data_bytes):
data_npyarr = np.frombuffer(data_bytes, np.uint8)
img = cv2.imdecode(data_npyarr,
self.cv2_IMREAD_FLAG) # cv2.IMREAD_UNCHANGED) # IMREAD_COLOR) # CV_LOAD_IMAGE_COLOR)
return img
class Handle_Rawpng(Data_Handler):
'''
cv::IMWRITE_PNG_COMPRESSION
For PNG, it can be the compression level from 0 to 9. A higher value means a smaller size and longer compression time.
If specified, strategy is changed to IMWRITE_PNG_STRATEGY_DEFAULT (Z_DEFAULT_STRATEGY). Default value is 1 (best speed setting).
config yaml:
{ png_dtype : xx, # uint8 | uint16
npy_dtype : xx, # float32
min : xx,
max : xx,
}
'''
# re_PXL_TYPE = r'CV_\d+(?:U|F|S)C\d+' # b(?:eq|ne|lt|gt)
# png_PXL_MAX = dict(uint8=255, uint16=65535)
def __init__(self, always_load_color=True, remap=None):
"""e.g.
remap=dict(png_dtype='uint16',npy_dtype='float32', min=[0,0], max=[1,1] )
"""
if always_load_color:
self.cv2_IMREAD_FLAG = cv2.IMREAD_COLOR
else:
self.cv2_IMREAD_FLAG = cv2.IMREAD_UNCHANGED
if remap is not None:
self.cv2_IMREAD_FLAG = cv2.IMREAD_UNCHANGED # if remap is need, always load unchanged.
#
if hasattr(remap['min'], '__len__'):
assert len(remap['min']) == len(remap['max']) # channel of pixels (Can be 1,2,3,4)
self.c = len(remap['min'])
else:
self.c = 0
assert remap['png_dtype'] in ['uint8', 'uint16'], remap['png_dtype']
self.png_PXL_MAX = dict(uint8=255, uint16=65535)[remap['png_dtype']]
remap['npy_dtype'] = eval('np.' + remap['npy_dtype']) # make it as np.dtype
remap['png_dtype'] = eval('np.' + remap['png_dtype']) # make it as np.dtype
remap['min'] = np.array(remap['min'], dtype=remap['npy_dtype'])
remap['max'] = np.array(remap['max'], dtype=remap['npy_dtype'])
self.remap = remap
# def remap(self, ):
def pack(self, imgID, imgpath):
rawbytes = open(imgpath, 'rb').read()
if self.remap is not None:
img = cv2.imdecode(np.frombuffer(rawbytes), self.cv2_IMREAD_FLAG)
return self.pack_from_npyarr(imgID, img)
#
key = imgID.encode('utf8') # ascii
value = rawbytes
return key, value
def pack_from_npyarr(self, imgID, npyarr):
if self.remap is not None:
# convert to a pixel in (min,max) -> (0,1.0)
assert npyarr.dtype == self.remap['npy_dtype']
if self.c > 0:
assert len(npyarr.shape) == 3 and npyarr.shape[2] == self.c
npyarr = (npyarr - self.remap['min']) / (self.remap['max'] - self.remap['min']) # map to [0.0,1.0]
npyarr = (npyarr * self.png_PXL_MAX).astype(
self.remap['png_dtype']) # map to [0,255] or [0,65535] # uint8 | uint16
h, w = npyarr.shape[:2]
# if len(npyarr.shape)==2:
# h,w = npyarr.shape
# else:
# h,w,c = npyarr.shape
if self.c == 2: # pad 3rd channel with 0
_npyarr = np.zeros((h, w, 3), dtype=npyarr.dtype)
_npyarr[:, :, :self.c] = npyarr
npyarr = _npyarr
retval, buf = cv2.imencode('.png', npyarr)
assert retval == True
rawbytes = buf.tostring()
#
key = imgID.encode('utf8') # ascii
value = rawbytes
return key, value
def unpack(self, data_bytes):
data_npyarr = np.frombuffer(data_bytes, np.uint8)
img = cv2.imdecode(data_npyarr,
self.cv2_IMREAD_FLAG) # cv2.IMREAD_UNCHANGED) # cv2.IMREAD_COLOR) # CV_LOAD_IMAGE_COLOR)
if self.remap is not None:
if self.c > 0:
img = (img[:, :, :self.c] / self.png_PXL_MAX).astype(self.remap['npy_dtype']) # map to [0.0,1.0]
else: # single channel image
img = (img / self.png_PXL_MAX).astype(self.remap['npy_dtype']) # map to [0.0,1.0]
img = img * (self.remap['max'] - self.remap['min']) + self.remap[
'min'] # map to [min,max] # * 65535 # uint16
assert img.dtype == self.remap['npy_dtype'] # [TODO] TO REMOVE
return img
def _bytes(key):
if isinstance(key, np.string_) or isinstance(key, np.bytes_):
return key.tobytes()
elif isinstance(key, str):
return key.encode('utf8')
elif isinstance(key, bytes):
return key
else:
print("Unknown type of key: ", type(key))
raise NotImplementedError
# def resize2max(img, h=None, w=None):
# assert not (h is None and w is None)
def pad_as_squared_img(img, Side=150):
# resize image
h, w = img.shape[:2]
maxside = max(h, w)
if Side is None or Side <= 0:
scale = 1.0
else:
scale = Side / float(maxside)
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) # INTER_LINEAR)
# pad image
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
h, w, c = img.shape
# c = img.shape[2] if len(img.shape)==3 else 1
maxside = max(h, w)
squared_img = np.full((maxside, maxside, c), 255, dtype=img.dtype)
if w < maxside:
start = (maxside - w) // 2
squared_img[:, start:start + w, :] = img
else:
start = (maxside - h) // 2
squared_img[start:start + h, :, :] = img
return squared_img
class ImageData_lmdb:
def __init__(self, db_path, mode='r', map_N=30000, max_readers=256, always_load_color=True, silent=False,
**kwargs): # map_N=250000
""" [kwargs examples]:
remap=dict(png_dtype='uint16',npy_dtype='float32', min=[0,0], max=[1,1] ) # for png handler of opt-flow (2-channel float32 img)
"""
self.db_path = os.path.abspath(db_path.rstrip("/")) # remove last '/' if there's any.
# assert os.path.exists(self.db_path)
self.mode = mode
if self.mode == 'w':
os.system('rm -rf %s' % self.db_path) # overwrite if overwrite and os.path.exists(self.db_path):
elif self.mode in ['r', 'a+']:
assert os.path.exists(self.db_path), "[Path not exists] %s" % self.db_path
else:
raise NotImplementedError
# self.lmdb_env = lmdb.open(self.db_path, map_size=self.map_size)
if self.mode == 'r':
self.map_size = (map_N * 256 * 256 * 3 * 4)
self.lmdb_env = lmdb.open(self.db_path, map_size=self.map_size, max_readers=max_readers, readahead=True,
readonly=True, lock=False) #
else:
self.map_size = (map_N * 256 * 256 * 3 * 4) * 10
self.lmdb_env = lmdb.open(self.db_path, map_size=self.map_size, max_readers=max_readers) # lock=True
if self.db_path.endswith('.Rawjpg.lmdb'):
if not silent: print("[Using] Handle_Rawjpg")
self.handle = Handle_Rawjpg(always_load_color=always_load_color) # bytes data handler (pack/unpack)
elif self.db_path.endswith('.Rawpng.lmdb'):
if not silent: print("[Using] Handle_Rawpng")
yamlfile = os.path.join(db_path, 'remap.yml')
if self.mode in ['r', 'a']:
''' e.g. {dtype: 32FC2, min : 0.0, max : 1.0} '''
remap = load_yaml(yamlfile) if os.path.exists(yamlfile) else None
print('---------> remap yaml: ', remap)
else: # write mode
remap = kwargs.get('remap', None)
print("Write png with remap: %s" % remap)
dump_yaml(remap, yamlfile)
self.handle = Handle_Rawpng(always_load_color=always_load_color,
remap=remap) # bytes data handler (pack/unpack)
elif self.db_path.endswith('.Npyarr.lmdb'):
if not silent: print("[Using] Handle_Npyarr")
self.handle = Handle_Npyarr()
elif self.db_path.endswith('.Rawebp.lmdb'):
if not silent: print("[Using] Handle_Rawebp")
if self.mode in ['w', 'a']:
QUALITY = kwargs.get('QUALITY', 95)
print("Compress QUALITY: ", QUALITY)
self.handle = Handle_Rawebp(QUALITY=QUALITY)
else:
self.handle = Handle_Rawebp()
else:
print('Unrecognized imagedata_lmdb extension:\n[db_path] %s' % self.db_path)
raise NotImplementedError
if not silent: print(self)
# print(self.len)
""" --- patch for rename keys ---
In lmdb, "keys are always lexicographically sorted".
This prevent us to shuffle the storage order of images, which is necessary when the training dataset size is really large, e.g ImageNet (45G~120G).
Pre-shuffling image order favor image data loader in training code, as it can read sequentially along the physical storage.
To do this, we re-name all image keys to '0000XXXXX' ('%09d' % image_id) format as it would be sorted by lmdb (same trick in caffe).
So when imgId2dbId.pkl, we need to map the actual image_id to db_id for retrieve a data.
"""
if os.path.exists(os.path.join(self.db_path, 'imgId2dbId.pkl')):
assert self.mode not in ['w', 'a+'], 'Not implement renamed key '
self.key_renamed = True
self.imgId2dbId = pickle.load(open(os.path.join(self.db_path, 'imgId2dbId.pkl'), 'rb')) # OrderedDict
else:
self.key_renamed = False
def __str__(self):
s = "[Path] %s \n" % self.db_path
for k, v in self.lmdb_env.stat().items():
s += '%20s %-10s\n' % (k, v)
return s
@property
def len(self):
return self.lmdb_env.stat()['entries']
@property
def keys(self):
if self.key_renamed:
return self.imgId2dbId.keys()
else:
key_cache_file = os.path.join(self.db_path, 'keys.txt')
if not os.path.exists(key_cache_file):
print("Building up keys cache ...")
with self.lmdb_env.begin() as txn:
keys = [key.decode('utf-8') for key, _ in txn.cursor()] # very slow!!!
# Note: .decode('utf-8') is to decode the bytes object to produce a string. (needed for py3)
with open(key_cache_file, 'w') as f:
f.write('\n'.join(keys))
else:
return [x.strip() for x in open(key_cache_file).readlines()]
return keys
def __getitem__(self, key):
''' key is usually imgID, normally we need key as bytes.'''
with self.lmdb_env.begin() as txn:
if self.key_renamed:
key = self.imgId2dbId[key]
key = _bytes(key)
raw_data = txn.get(key) # (key.tobytes()) #(b'00000000')
assert raw_data is not None
img = self.handle.unpack(raw_data)
return img
# new added.
def __setitem__(self, key, npyarr):
''' key is usually imgID, normally we need key as bytes.'''
assert self.mode in ['w', 'a+'], "Not open in write mode: %s " % self.mode
key, value = self.handle.pack_from_npyarr(key, npyarr)
with self.lmdb_env.begin(write=True) as txn:
txn.put(key, value)
def put(self, imgID, imgpath):
key, value = self.handle.pack(imgID, imgpath)
with self.lmdb_env.begin(write=True) as txn:
txn.put(key, value)
def get(self, key):
""" return raw bytes buf."""
with self.lmdb_env.begin() as txn:
if self.key_renamed:
key = self.imgId2dbId[key]
key = _bytes(key)
raw_data = txn.get(key)
assert raw_data is not None
return raw_data
def vis(self, nr_row=5, nr_col=6, side=150, randshow=False, is_depth_img=False): # nr_per_row=6,
# Iter by keys
keys = self.keys
if randshow:
random.shuffle(keys)
itkeys = iter(keys)
if True:
# with self.lmdb_env.begin() as txn:
row_images = []
rows = []
# for key, raw_data in txn.cursor():
# assert raw_data is not None
# pad_img = pad_as_squared_img(self.handle.unpack(raw_data), Side=side)
# Iter by keys
for key in itkeys:
next_img = self[key]
if is_depth_img: # [TODO] TO REMOVE this case.
_max, _min = next_img.max(), next_img.min()
next_img = (next_img.astype(np.float32) - _min) / (_max - _min) # normalize [min,max] to [0,1]
elif self.db_path.endswith('.Rawpng.lmdb') and self.handle.remap is not None:
_max, _min = self.handle.remap['max'], self.handle.remap['min']
next_img = (next_img - _min) / (_max - _min)
pad_img = pad_as_squared_img(next_img, Side=side)
if len(row_images) < nr_col:
row_images.append(pad_img)
else:
cat_imgs = np.concatenate(row_images, axis=1)
row_images = [pad_img] # reset row_images
if len(rows) < nr_row:
rows.append(cat_imgs)
else:
cat_alls = np.concatenate(rows, axis=0)
cv2.imshow('images', cat_alls)
cv2_wait()
rows = [cat_imgs] # reset rows
def compute_mean_std(self, sample_k=None):
import random
random.seed(0)
# Iter by keys
keys = self.keys
if sample_k is not None:
keys = random.sample(keys, sample_k)
C1 = 1e7
cnt = 0
sum_pxl = np.zeros((3,), np.float32)
for i, key in enumerate(keys):
data = self[key].astype(np.float32) / C1 # 255.
if len(data.shape) == 3:
h, w, c = data.shape
else:
(h, w), c = data.shape, 1
# print '--',np.sum(data.reshape(-1, c), axis=0)
sum_pxl += np.sum(data.reshape(-1, c), axis=0)
cnt += h * w
if i % 1000 == 0:
print('\r [mean] %s / %s ' % (i, len(keys))),
sys.stdout.flush()
pxl_mean = sum_pxl / cnt * C1 # [0,255]
pxl_mean = np.array([2.19471788e-05, 2.19471788e-05, 2.19471788e-05], np.float32) * C1
C2 = 1e8
cnt = 0
sum_var = np.zeros((3,), np.float32)
for i, key in enumerate(keys):
data = self[key].astype(np.float32) # / C # 255.
if len(data.shape) == 3:
h, w, c = data.shape
else:
(h, w), c = data.shape, 1
sum_var += np.sum(((data.reshape(-1, c) - pxl_mean) ** 2) / C2, axis=0)
cnt += h * w
if i % 1000 == 0:
print('\r [std] %s / %s ' % (i, len(keys))),
sys.stdout.flush()
pxl_std = np.sqrt(sum_var / (cnt - 1) * C2)
print('pxl_mean: ', pxl_mean) # [2.19471788e-05 2.19471788e-05 2.19471788e-05]
print('pxl_std: ', pxl_std)
return pxl_mean, pxl_std
def test_handle():
# handle=Handle_Rawjpg()
# handle = Handle_Npyarr()
# test
# -# handle=Handle_Rawjpg()
# -# imgID = 'n02690373_16'
# -# imgpath = '/Users/shine/working/cvpr17pose/dataset/PASCAL3D/ImageData.Max500/aeroplane_imagenet/n02690373_16.jpg'
handle = Handle_Rawpng()
imgID = '1507795919477055'
imgpath = '/Users/shine/Pictures/1507795919477055.png'
#
k, v = handle.pack(imgID, imgpath)
#
cv2.imshow('image-pack', handle.unpack(v))
cv2_wait()
img_arr = cv2.imread(imgpath)
k, v = handle.pack_from_npyarr(imgID, img_arr)
#
cv2.imshow('image-pack_from_npyarr', handle.unpack(v))
cv2_wait()
print(len(v))
exit()
def test_lmdb():
# lmdb_env = lmdb.open(os.path.join(PASCAL3D_Dir,'ImageData.Max500.Rawjpg.lmdb'), map_size=map_size)
lmdb_env = lmdb.open(env.Home + '/working/cvpr17pose/dataset/PASCAL3D/ImageData.Max500.Rawjpg.lmdb', map_size=10000)
handle = Handle_Rawjpg()
imgID = b'n03693474_19496'
with lmdb_env.begin() as txn:
raw_data = txn.get(imgID) # .tobytes()) #(b'00000000')
cv2.imshow('image', handle.unpack(raw_data))
cv2_wait()
# print (len(v))
# exit()
def test_img_lmdb():
imgdb = ImageData_lmdb(env.Home + '/working/cvpr17pose/dataset/PASCAL3D/ImageData.Max500.Rawjpg.lmdb')
# imgID = np.string_('n03693474_19496') # np.string_
# imgID = np.unicode_('n03693474_19496') # np.string_
# imgID = np.string_('n03693474_19496').tobytes() # np.bytes_ ?
imgID = 'n03693474_19496' # str
# imgID = b'n03693474_19496' # bytes
im = imgdb[imgID]
cv2.imshow('image', im)
cv2_wait()
def test_read_db1():
imgdb = ImageData_lmdb(env.Home + '/working/cvpr17pose/dataset/PASCAL3D/ImageData.Max500.Rawjpg.lmdb')
print(imgdb.keys[:10])
for imgID in imgdb.keys:
img = imgdb[imgID]
cv2.imshow('img', img)
cv2_wait()
def test_vis_lmdb(db_path):
imgdb = ImageData_lmdb(db_path)
imgdb.vis()
def test_compute():
imgdb = ImageData_lmdb(
env.Home + '/working/3DClassification/dataset/rendDB.cache/ModelNet10/MVCNN_12V.white/train.Rawjpg.lmdb',
always_load_color=False)
imgdb.compute_mean_std(sample_k=5000)
# pxl_mean: [219.47179 219.47179 219.47179]
# pxl_std: [58.2633316 58.2633316 58.2633316]
def show_lmdb():
# optionally resume from a checkpoint
import argparse
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('db_path', default='', type=str, metavar='PATH', )
parser.add_argument('-f', '--format', default='5x6', type=str, help='rows and cols of display.')
parser.add_argument('-s', '--side', default=150, type=int, help='max side len of each image.')
parser.add_argument('-r', '--random', action="store_true", default=False, help='random show entries in DB.')
parser.add_argument('-d', '--depth', action="store_true", default=False,
help='show depth image by normalize [min,max] to [0,1].')
# --keep_last_only
args = parser.parse_args()
row, col = map(int, args.format.strip().split('x'))
imgdb = ImageData_lmdb(args.db_path)
if not args.depth and os.path.split(args.db_path.rstrip('/'))[1].lower().find('depth') >= 0:
if args.db_path.endswith('Rawpng.lmdb') and os.path.exists(os.path.join(args.db_path, 'remap.yml')):
pass #
else:
use_depth_normalize = input(
'\nIs this depth image db? \nConsider visualize option "-d" or "--depth"? Y/[N] ')
if use_depth_normalize.upper() == 'Y':
args.depth = True
imgdb.vis(row, col, args.side, args.random, args.depth)
if __name__ == '__main__':
if len(sys.argv) > 1:
show_lmdb() # sys.argv[1]
else:
test_compute()
| 22,562 | 37.503413 | 159 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/torch_3rd_layers.py | import torch
import torch.nn as nn
import numpy as np
from basic.common import is_py3
if is_py3:
from functools import reduce
'''
Define Maskout layer
'''
class Maskout(nn.Module):
def __init__(self, nr_cate=3): # ks: kernel_size
super(Maskout, self).__init__()
self.nr_cate = nr_cate
def forward(self, x, label):
'''[input]
x: of shape (batchsize, nr_cate, nr_feat)
or of shape (batchsize, nr_cate) where nr_feat is treat as 1.
label:
[output]
tensor of shape (batchsize, nr_cate)
'''
batchsize, _nr_cate = x.size(0), x.size(1) # x can be (batchsize, nr_cate) or (batchsize, nr_cate, nr_feat)
assert _nr_cate == self.nr_cate, "2nd dim of x should be self.nr_cate=%s" % self.nr_cate
assert batchsize == label.size(0) # first dim equals to batchsize
assert batchsize == reduce(lambda a1, a2: a1 * a2, label.size()) # total size equals to batchsize
item_inds = torch.from_numpy(np.arange(batchsize))
if x.is_cuda:
item_inds = item_inds.cuda()
cate_ind = label.view(batchsize)
assert cate_ind.lt(self.nr_cate).all(), '[Exception] All index in cate_ind should be smaller than nr_cate.'
masked_shape = (batchsize,) + x.size()[2:]
return x[item_inds, cate_ind].view(*masked_shape) # (*label.size()) #(batchsize,1)
"""
x.shape:
batch_size * nr_cate * nr_bins
"""
| 1,480 | 31.911111 | 116 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/torch_v4_feature.py | import math
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.parameter import Parameter
from torch.nn.functional import * # pad, avg_pool2d
# this is already in v4.0, but now I'm using v3.0
def local_response_norm(input, size, alpha=1e-4, beta=0.75, k=1):
"""Applies local response normalization over an input signal composed of
several input planes, where channels occupy the second dimension.
Applies normalization across channels.
See :class:`~torch.nn.LocalResponseNorm` for details.
"""
dim = input.dim()
if dim < 3:
raise ValueError('Expected 3D or higher dimensionality \
input (got {} dimensions)'.format(dim))
div = input.mul(input).unsqueeze(1)
if dim == 3:
div = pad(div, (0, 0, size // 2, (size - 1) // 2))
div = avg_pool2d(div, (size, 1), stride=1).squeeze(1)
else:
sizes = input.size()
div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2))
div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1)
div = div.view(sizes)
div = div.mul(alpha).add(k).pow(beta)
return input / div
class LocalResponseNorm(Module):
def __init__(self, size, alpha=1e-4, beta=0.75, k=1):
r"""Applies local response normalization over an input signal composed
of several input planes, where channels occupy the second dimension.
Applies normalization across channels.
.. math::
`b_{c} = a_{c}\left(k + \frac{\alpha}{n}
\sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta}`
Args:
size: amount of neighbouring channels used for normalization
alpha: multiplicative factor. Default: 0.0001
beta: exponent. Default: 0.75
k: additive factor. Default: 1
Shape:
- Input: :math:`(N, C, ...)`
- Output: :math:`(N, C, ...)` (same shape as input)
Examples::
>>> lrn = nn.LocalResponseNorm(2)
>>> signal_2d = autograd.Variable(torch.randn(32, 5, 24, 24))
>>> signal_4d = autograd.Variable(torch.randn(16, 5, 7, 7, 7, 7))
>>> output_2d = lrn(signal_2d)
>>> output_4d = lrn(signal_4d)
"""
super(LocalResponseNorm, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input):
# return F.local_response_norm(input, self.size, self.alpha, self.beta, self.k)
return local_response_norm(input, self.size, self.alpha, self.beta, self.k)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ str(self.size) \
+ ', alpha=' + str(self.alpha) \
+ ', beta=' + str(self.beta) \
+ ', k=' + str(self.k) + ')'
| 2,964 | 36.531646 | 87 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/torch_3rd_funcs.py | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
np.set_printoptions(precision=5, suppress=True)
def norm2unit(vecs, dim=1, p=2):
""" vecs is of 2D: (batchsize x nr_feat) or (batchsize x nr_feat x restDims)
We normalize it to a unit vector here.
For example, since mu is the coordinate on the circle (x,y) or sphere (x,y,z),
we want to make it a unit norm.
"""
vsize = vecs.size()
batchsize, nr_feat = vsize[0], vsize[1]
## check bad input
if hasattr(vecs, 'data'): # vecs is Variable
check_inf = (torch.abs(vecs.data) == float('inf')) # It's weird that Variable cannot compare with float('inf')
else: # vecs is Tensor
check_inf = (torch.abs(vecs) == float('inf')) # infinite can be '-inf' and 'inf'
check_nan = (vecs != vecs) # https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/4
if check_inf.any() or check_nan.any():
print(vecs)
print('[Exception] some input values are either "nan" or "inf" ! (from norm2unit)')
exit()
# Trick for numeric stability (norm won't becomes 0)
# vecs = vecs + torch.sign(vecs)*1e-4 # Warning: sign(0) --> 0
signs = torch.sign(vecs)
signs[signs == 0] = 1
vecs = vecs + signs * 1e-4
# print mu
# Just for debugging in case
vecs_in = vecs.clone()
# Compute norm.
# Note the detach(), that is essential for the gradients to work correctly.
# We want the norm to be treated as a constant while dividing the Tensor with it.
# norm = torch.norm(vecs, p, dim, keepdim=True).detach() # here p=2 just means l2 norm Warning: L1 norm is the sum of absolute values.
norm = ((vecs ** p).sum(dim=dim, keepdim=True) ** (
1. / p)).detach() # Warning: if p=1, this line doesn't use absolute values as L1 norm.
# Warning: In pytorch doc of "torch.norm()", the formula is not correct.
# It should be sqrt[p]{|x_1|^p + |x_2|^p + ... + |x_N|^p}
# (https://pytorch.org/docs/stable/torch.html#torch.norm)
# print vecs[:5,:]
# print norm[:5,:]
# import torch.nn.functional as F # normalize # .normalize(input, p=2, dim=1, eps=1e-12, out=None)
# print F.normalize(vecs[:5,:], p=p, dim=1)
# exit()
# norm = norm.view(batchsize,1,*vsize[2:])
# Check if any norm are close to 0
# Should happen anymore since we have done "vecs = vecs + torch.sign(vecs)*1e-4"
check_bad_norm = torch.abs(norm).lt(1e-4)
if check_bad_norm.any():
print(check_bad_norm)
print('[Exception] some norm close to 0. (from norm2unit)')
exit()
# do normalize by division.
vecs = vecs.div(norm)
# vecs = vecs.div(norm.expand_as(vecs))
recomputed_norm = torch.sum((vecs ** p), dim=dim)
check_mu = torch.abs(recomputed_norm - 1.0).lt(1e-6)
if not check_mu.all():
np.set_printoptions(threshold=np.inf)
# print mu.data.cpu().numpy()
# print (mu**2).sum().eq(1).data.cpu().numpy()
print(norm.data.cpu().numpy())
print(torch.cat([vecs_in, vecs, norm, recomputed_norm.view(batchsize, 1)], dim=dim)[~check_mu,
:].data.cpu().numpy())
print('[Exception] normalization has problem. (from norm2unit)')
exit()
# assert (mu**2).sum().eq(1).all(), '[Exception] normalization has problem.'
return vecs
def _check_inf_nan(vecs):
# check if vecs contains inf
if hasattr(vecs, 'data'): # vecs is Variable
check_inf = (torch.abs(vecs.data) == float('inf')) # It's weird that Variable cannot compare with float('inf')
else: # vecs is Tensor
check_inf = (torch.abs(vecs) == float('inf')) # infinite can be '-inf' and 'inf'
# check if vecs contains nan
check_nan = (vecs != vecs) # https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/4
if check_inf.any() or check_nan.any():
print(vecs)
print('[Exception] some input values are either "nan" or "inf" ! (from norm2unit)')
exit()
# return check_inf.any() or check_nan.any()
def _check_bad_norm(norm_vecs):
# Check if any norm are close to 0
# Should happen anymore since we have done "vecs = vecs + torch.sign(vecs)*1e-4"
check_bad_norm = torch.abs(norm_vecs).lt(1e-4)
if check_bad_norm.any():
print(check_bad_norm)
print('[Exception] some norm close to 0. (from norm2unit)')
exit()
def exp_Normalization(vecs, l_n=2, debug=True): # when l1, same as softmax
""" vecs is of 2D: (batchsize x nr_feat)
We first apply exponential, and then normalize it to a unit vector here.
Compute:
x_i / ( sum( x_j**l_n ) ) ** (1/l_n)
e.g.: L1: x_i / sum( x_j )
L2: x_i / sqrt( sum( x_j**2 ) )
Note: signs of input vecs is lost. (since this is exponential always return positive number.)
"""
batchsize, nr_feat = vecs.size()
# check if input are all valid number.
_check_inf_nan(vecs)
# apply exponential
vecs = torch.exp(vecs)
# check again
_check_inf_nan(vecs)
# Trick for numeric stability (norm won't becomes 0)
vecs = vecs + torch.sign(vecs) * 1e-4
# Just for debugging in case
if debug:
_vecs_in_ = vecs.clone()
# Compute norm.
norm = torch.norm(vecs, p=l_n, dim=1).view(batchsize, 1) # here p=2 just means l2 norm
# Note the detach(), that is essential for the gradients to work correctly.
# We want the norm to be treated as a constant while dividing the Tensor with it.
if hasattr(vecs, 'data'): # vecs is Variable
norm = norm.detach()
if debug: _check_bad_norm(norm)
# do normalize by division.
vecs = vecs.div(norm.expand_as(vecs))
if debug:
check_unit = torch.abs(torch.sum((vecs ** l_n), dim=1) - 1.0).lt(1e-6)
if not check_unit.all():
print(torch.cat([_vecs_in_, vecs, norm], dim=1).data.cpu().numpy())
print('[Exception] normalization has problem. (from exp_Normalization)')
exit()
return vecs
def as_numpy(x):
x = x.data if isinstance(x, Variable) else x
if x.is_cuda:
return x.cpu().numpy()
else:
return x.numpy()
if __name__ == '__main__':
# ---- Generate input vecs -----
batchsize = 3
# vecs = torch.Tensor(batchsize, 3).uniform_(0, 1)
vecs = torch.randn(batchsize, 10)
vecs = torch.autograd.Variable(vecs)
# --------------
print("[Input]", vecs)
expNorm = exp_Normalization(vecs.clone(), l_n=1, debug=True)
# print "[ExpL1]", expNorm
print("[LogExpL1]", torch.log(expNorm))
# -------------- Calling pytorch build function.
import torch.nn.functional as F
# print "[F.softmax]", F.softmax(vecs.clone(), dim=1, _stacklevel=3)
print("[F.log_softmax]", F.log_softmax(vecs.clone(), dim=1)) # , _stacklevel=5
| 6,932 | 36.679348 | 140 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/netutil/common_v2/trunk_alexnet_bvlc.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from basic.common import rdict
import numpy as np
from easydict import EasyDict as edict
import math
from pytorch_util.libtrain import copy_weights, init_weights_by_filling
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit
__all__ = ['AlexNet_Trunk'] # , 'alexnet']
# nr_cate = 3
''' Name_in_caffe
features.0.weight --> features.0.weight conv1
features.0.bias --> features.0.bias
features.3.weight --> features.3.weight conv2
features.3.bias --> features.3.bias
features.6.weight --> features.6.weight conv3
features.6.bias --> features.6.bias
features.8.weight --> features.8.weight conv4
features.8.bias --> features.8.bias
features.10.weight --> features.10.weight conv5
features.10.bias --> features.10.bias
classifier.1.weight --> classifier.1.weight fc6
classifier.1.bias --> classifier.1.bias
classifier.4.weight --> classifier.4.weight fc7
classifier.4.bias --> classifier.4.bias
classifier.6.weight --> classifier.6.weight fc8
classifier.6.bias --> classifier.6.bias
'''
class AlexNet_Trunk(nn.Module):
def __init__(self, init_weights=True):
super(AlexNet_Trunk, self).__init__()
self.net_arch = 'alexnet'
# -- Trunk architecture
# -- Convolutional layers
self.Convs = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2),
# [ 0] conv1 Conv1 TO CHECK: alexnet is 96 instead of 64 here.
nn.ReLU(inplace=True), # [ 1] relu1
nn.MaxPool2d(kernel_size=3, stride=2), # [ 2] pool1
LocalResponseNorm(5, alpha=1e-4, beta=0.75, k=1), # [ 3] norm1
nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2), # [ 4] conv2 Conv2
nn.ReLU(inplace=True), # [ 5] relu2
nn.MaxPool2d(kernel_size=3, stride=2), # [ 6] pool2
LocalResponseNorm(5, alpha=1e-4, beta=0.75, k=1), # [ 7] norm2
nn.Conv2d(256, 384, kernel_size=3, padding=1), # [ 8] conv3 Conv3
nn.ReLU(inplace=True), # [ 9] relu3
nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2), # [10] conv4 Conv4
nn.ReLU(inplace=True), # [11] relu4
nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2), # [12] conv5 Conv5
nn.ReLU(inplace=True), # [13] relu5
nn.MaxPool2d(kernel_size=3, stride=2), # [14] pool5
)
# -- Fully connected layers
self.Fcs = nn.Sequential(
nn.Linear(256 * 6 * 6, 4096), # [0] fc6 Fc6
nn.ReLU(inplace=True), # [1] relu6
nn.Dropout(),
# [2] drop6 TO CHECK: alexnet Dropout should follow after Fc
nn.Linear(4096, 4096), # [3] fc7 Fc7
nn.ReLU(inplace=True), # [4] relu7
nn.Dropout(), # [5] drop7
)
if init_weights == True: # for legacy
self.init_weights(pretrained='caffemodel')
elif isinstance(init_weights, str) or init_weights is None:
self.init_weights(pretrained=init_weights)
else:
raise NotImplementedError
# @Interface
def forward(self, x): # In-replace forward_trunk
""" x is input image data.
x is of shape: (batchsize, 3, 227, 227)
"""
# forward convolutional layers
x = self.Convs(x)
#
# forward fully connected layers
batchsize = x.size(0) # .split(1, dim=1)
x = x.view(batchsize, 256 * 6 * 6)
x = self.Fcs(x)
#
return x
def init_weights(self, pretrained='caffemodel'):
""" Two ways to init weights:
1) by copying pretrained weights.
2) by filling empirical weights. (e.g. gaussian, xavier, uniform, constant, bilinear).
"""
if pretrained is None:
print('initialize weights by filling (Fc:gaussian, Conv:kaiming_normal).')
init_weights_by_filling(self)
elif pretrained == 'caffemodel':
print("Initializing weights by copying (pretrained caffe weights).")
src2dsts = dict(conv1='Convs.0', conv2='Convs.4', conv3='Convs.8',
conv4='Convs.10', conv5='Convs.12', fc6='Fcs.0', fc7='Fcs.3')
copy_weights(self.state_dict(), 'caffemodel.alexnet', src2dsts=src2dsts)
elif pretrained == 'torchmodel':
raise NotImplementedError
else:
raise NotImplementedError
return self
# def _init_weights_by_filling(self): # _initialize_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# if m.bias is not None:
# m.bias.data.zero_()
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# elif isinstance(m, nn.Linear):
# m.weight.data.normal_(0, 0.01)
# m.bias.data.zero_()
def fix_conv1_conv2(self):
# Fix the layers before conv3:
for layer in range(8):
for p in self.Convs[layer].parameters():
p.requires_grad = False
class Test_AlexNet(nn.Module):
def __init__(self, nr_cate=3, _Trunk=AlexNet_Trunk):
super(Test_AlexNet, self).__init__()
self.truck = _Trunk(init_weights=True)
self.nr_cate = nr_cate
self.maskout = Maskout(nr_cate=nr_cate)
def forward(self, x, label):
x = self.truck(x)
# your code here
batchsize = x.size(0)
# -- Head architecture
self.head_s2 = nn.Sequential(
nn.Linear(4096, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 3), # 252=3*3
)
self.head_s1 = nn.Sequential(
nn.Linear(4096, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 2),
)
# self.maskout = Maskout(nr_cate=nr_cate)
# Note: s1(x,y) is on a circle and (x^2+y^2=1)
x_s2 = self.maskout(self.head_s2(x).view(batchsize, self.nr_cate, 3), label)
x_s1 = self.maskout(self.head_s1(x).view(batchsize, self.nr_cate, 2), label)
# -- Normalize coordinate to a unit
x_s2 = norm2unit(x_s2) # , nr_cate=self.nr_cate)
x_s1 = norm2unit(x_s1) # , nr_cate=self.nr_cate)
Pred = edict(s2=x_s2, s1=x_s1)
return Pred
if __name__ == '__main__':
model = Test_AlexNet()
# import numpy as np
dummy_batch_data = np.zeros((2, 3, 227, 227), dtype=np.float32)
dummy_batch_label = np.zeros((2, 1), dtype=np.int64)
dummy_batch_data = torch.autograd.Variable(torch.from_numpy(dummy_batch_data))
dummy_batch_label = torch.autograd.Variable(torch.from_numpy(dummy_batch_label))
Pred = model(dummy_batch_data, dummy_batch_label)
print(Pred.s2)
| 7,390 | 38.736559 | 99 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/netutil/common_v2/trunk_inception.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
__all__ = ['_Inception3_Trunk'] # ['Inception3', 'inception_v3']
model_urls = {
# Inception v3 ported from TensorFlow
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}
# def inception_v3(pretrained=False, **kwargs):
# r"""Inception v3 model architecture from
# `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
#
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# if pretrained:
# if 'transform_input' not in kwargs:
# kwargs['transform_input'] = True
# model = Inception3(**kwargs)
# model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
# return model
#
# return Inception3(**kwargs)
class _Inception3_Trunk(nn.Module):
# def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
def __init__(self, aux_logits=True, transform_input=False,
init_weights=True, start=None, end=None):
super(_Inception3_Trunk, self).__init__()
self.net_arch = 'inception_v3' # @Shine: added
#
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.numel()))
values = values.view(m.weight.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# ---------------------------------------------------------
self._layer_names = [name for name, module in self.named_children()]
#
if init_weights:
self.init_weights(pretrained='caffemodel')
# build a short cut by sequence
self.truck_seq = self.sub_seq(start=start, end=end) # 'conv1', 'pool5'
# ---------------------------------------------------------
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
if self.training and self.aux_logits:
aux = self.AuxLogits(x)
# 17 x 17 x 768
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
x = self.fc(x)
# 1000 (num_classes)
if self.training and self.aux_logits:
return x, aux
return x
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
# 17 x 17 x 768
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# 5 x 5 x 768
x = self.conv0(x)
# 5 x 5 x 128
x = self.conv1(x)
# 1 x 1 x 768
x = x.view(x.size(0), -1)
# 768
x = self.fc(x)
# 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
| 12,691 | 36.439528 | 103 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/netutil/common_v2/trunk_vgg.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from collections import OrderedDict as odict
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.libtrain import copy_weights, init_weights_by_filling
__all__ = ['_VGG_Trunk', 'VGGM_Trunk', 'VGG16_Trunk', 'VGG19_Trunk'] # , 'alexnet']
# __all__ = [
# 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
# 'vgg19_bn', 'vgg19',
# ]
# model_urls = {
# 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
# 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
# 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
# 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
# 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
# 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
# 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
# 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# }
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
# @Shine: [ref] https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/vggm.py
# https://gist.github.com/ksimonyan/f194575702fae63b2829#file-vgg_cnn_m_deploy-prototxt
def make_layers_vggm():
return nn.Sequential(
nn.Conv2d(3, 96, (7, 7), (2, 2)),
nn.ReLU(inplace=True),
LocalResponseNorm(5, 0.0005, 0.75, 2), # note that k=2 (default is k=1, e.g. alexnet)
nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
nn.Conv2d(96, 256, (5, 5), (2, 2), (1, 1)),
nn.ReLU(inplace=True),
LocalResponseNorm(5, 0.0005, 0.75, 2), # note that k=2 (default is k=1, e.g. alexnet)
nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1)),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)),
nn.ReLU(inplace=True),
nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
type2cfg = dict(vgg16=cfg['D'], vgg19=cfg['E'])
def get_caffeNaming(cfg_list, learnable_only=True):
all_names = []
# ---------- Convolution layers ----------
# All block ending by Max Pooling, so first find indices of pooling
maxpool_inds = [i for i, x in enumerate(cfg_list) if x == "M"]
assert len(maxpool_inds) == 5
start, end = 0, 0
for i in range(5):
block_id = i + 1
start = end
end = maxpool_inds[i] + 1
block_cfg = cfg_list[start:end]
block_names = []
for j in range(len(block_cfg) - 1):
conv_id = j + 1
block_names.append('conv{}_{}'.format(block_id, conv_id))
if not learnable_only:
block_names.append('relu{}_{}'.format(block_id, conv_id))
if not learnable_only:
block_names.append('pool{}'.format(block_id))
all_names += block_names
print(block_names)
print('--------------------------')
# ---------- Fully connected layers ----------
if learnable_only:
all_names += ['fc6', 'fc7']
else:
all_names += ['fc6', 'relu6', 'drop6',
'fc7', 'relu7', 'drop7', ]
# 'fc8']
return all_names
# print get_caffeNaming(type2cfg['vgg16'])
# exit()
def get_caffeSrc2Dst(net_arch='vgg16'):
# @Interface
if net_arch == 'vgg16':
return odict(conv1_1='features.0', conv1_2='features.2',
conv2_1='features.5', conv2_2='features.7',
conv3_1='features.10', conv3_2='features.12', conv3_3='features.14',
conv4_1='features.17', conv4_2='features.19', conv4_3='features.21',
conv5_1='features.24', conv5_2='features.26', conv5_3='features.28',
fc6='classifier.0', fc7='classifier.3')
elif net_arch == 'vgg19':
return odict(conv1_1='features.0', conv1_2='features.2',
conv2_1='features.5', conv2_2='features.7',
conv3_1='features.10', conv3_2='features.12', conv3_3='features.14', conv3_4='features.16',
conv4_1='features.19', conv4_2='features.21', conv4_3='features.23', conv4_4='features.25',
conv5_1='features.28', conv5_2='features.30', conv5_3='features.32', conv5_4='features.34',
fc6='classifier.0', fc7='classifier.3')
elif net_arch in ['vgg16_bn', 'vgg19_bn']:
print('No pretrained model for vgg16_bn, vgg19_bn.')
raise NotImplementedError
elif net_arch == 'vggm':
return odict(conv1='features.0', # (96, 3, 7, 7) 0.1 M (96,) 0.0 M
conv2='features.4', # (256, 96, 5, 5) 2.3 M (256,) 0.0 M
conv3='features.8', # (512, 256, 3, 3) 4.5 M (512,) 0.0 M
conv4='features.10', # (512, 512, 3, 3) 9.0 M (512,) 0.0 M
conv5='features.12', # (512, 512, 3, 3) 9.0 M (512,) 0.0 M
fc6='classifier.0', # (4096, 18432) 288.0 M (4096,) 0.0 M
fc7='classifier.3', ) # (4096, 4096) 64.0 M (4096,) 0.0 M
else:
raise NotImplementedError
'''
'D': vgg16 'E': vgg19
------------------------------------- -------------------------------------
conv1_1 64 [ 0] conv1_1 64 [ 0]
relu1_1 [ 1] relu1_1 [ 1]
conv1_2 64 [ 2] conv1_2 64 [ 2]
relu1_2 [ 3] relu1_2 [ 3]
pool1 'M' (max pooling) [ 4] pool1 'M' (max pooling) [ 4]
------------------------------------- -------------------------------------
conv2_1 128 [ 5] conv2_1 128 [ 5]
relu2_1 [ 6] relu2_1 [ 6]
conv2_2 128 [ 7] conv2_2 128 [ 7]
relu2_2 [ 8] relu2_2 [ 8]
pool2 'M' (max pooling)[ 9] pool2 'M' (max pooling)[ 9]
------------------------------------- -------------------------------------
conv3_1 256 [10] conv3_1 256 [10]
relu3_1 [11] relu3_1 [11]
conv3_2 256 [12] conv3_2 256 [12]
relu3_2 [13] relu3_2 [13]
conv3_3 256 [14] conv3_3 256 [14]
relu3_3 [15] relu3_3 [15]
pool3 'M' (max pooling)[16] conv3_4 256 [16]
------------------------------------- relu3_4 [17]
conv4_1 512 [17] pool3 'M' (max pooling)[18]
relu4_1 [18] -------------------------------------
conv4_2 512 [19] conv4_1 512 [19]
relu4_2 [20] relu4_1 [20]
conv4_3 512 [21] conv4_2 512 [21]
relu4_3 [22] relu4_2 [22]
pool4 'M' (max pooling)[23] conv4_3 512 [23]
------------------------------------- relu4_3 [24]
conv5_1 512 [24] conv4_4 512 [25]
relu5_1 [25] relu4_4 [26]
conv5_2 512 [26] pool4 'M' (max pooling)[27]
relu5_2 [27] -------------------------------------
conv5_3 512 [28] conv5_1 512 [28]
relu5_3 [29] relu5_1 [29]
pool5 'M' (max pooling)[30] conv5_2 512 [30]
------------------------------------- relu5_2 [31]
conv5_3 512 [32]
relu5_3 [33]
conv5_4 512 [34]
relu5_4 [35]
pool5 'M' (max pooling)[36]
-------------------------------------
'''
class _VGG_Trunk(nn.Module):
def __init__(self, vgg_type='vgg16', init_weights=True):
super(_VGG_Trunk, self).__init__()
self.net_arch = vgg_type
if vgg_type == 'vggm':
# Convolutional layers
self.features = make_layers_vggm() # for vgg16, vgg19, vgg16_bn, vgg19_bn
# Fully connected layers
self.classifier = nn.Sequential(
nn.Linear(512 * 6 * 6, 4096), # [0] Note: 512 x 6 x 6 for vgg-m
nn.ReLU(True), # [1]
nn.Dropout(), # [2]
nn.Linear(4096, 4096), # [3]
nn.ReLU(True), # [4]
nn.Dropout(), # [5]
# nn.Linear(4096, num_classes), # @Shine prob removed
)
else:
# Convolutional layers
self.features = make_layers(type2cfg[vgg_type]) # for vgg16, vgg19, vgg16_bn, vgg19_bn
# Fully connected layers
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096), # [0]
nn.ReLU(True), # [1]
nn.Dropout(), # [2]
nn.Linear(4096, 4096), # [3]
nn.ReLU(True), # [4]
nn.Dropout(), # [5]
# nn.Linear(4096, num_classes), # @Shine prob removed
)
# if init_weights:
# self.init_weights(pretrained='caffemodel' if (self.net_arch in ['vgg16', 'vgg19']) else None)
if init_weights == True: # for legacy
assert self.net_arch in ['vgg16', 'vgg19']
self.init_weights(pretrained='caffemodel')
elif isinstance(init_weights, str) or init_weights is None:
self.init_weights(pretrained=init_weights)
else:
raise NotImplementedError
# [TODO] build a short cut by sequence: e.g.
# self.truck_seq = self.sub_seq(start='conv1', end='conv5')
# [TODO] To implement (see trunk_resnet.py)
"""
def sub_seq(self, start=None, end=None): # 'conv1', 'pool5'
# select sub-sequence from trunk (by creating a shot cut).
assert start is None or start in self._layer_names, '[Error] %s is not in %s' % (start, self._layer_names)
assert end is None or end in self._layer_names, '[Error] %s is not in %s' % (end , self._layer_names)
start_ind = self._layer_names.index(start) if (start is not None) else 0
end_ind = self._layer_names.index(end) if (end is not None) else len(self._layer_names)-1
assert start_ind<=end_ind
self.selected_layer_name = self._layer_names[start_ind:end_ind+1]
print("Selected sub-sequence: %s" % self.selected_layer_name)
_seq = nn.Sequential(*[self.__getattr__(x) for x in self.selected_layer_name])
return _seq # (self.conv1, self.bn1, self.relu, self.maxpool, self.layer1,self.layer2,self.layer3 )
"""
# @Interface
def forward(self, x): # In-replace forward_trunk
""" x is input image data.
x is of shape: (batchsize, 3, 224, 224) """
# forward convolutional layers
x = self.features(x)
# forward fully connected layers
batchsize = x.size(0) # .split(1, dim=1)
x = x.view(batchsize, -1)
x = self.classifier(x)
#
return x
def init_weights(self, pretrained='caffemodel'):
""" Two ways to init weights:
1) by copying pretrained weights.
2) by filling empirical weights. (e.g. gaussian, xavier, uniform, constant, bilinear).
"""
if pretrained is None:
print('initialize weights by filling (Fc:gaussian, Conv:kaiming_normal).')
init_weights_by_filling(self)
elif pretrained == 'caffemodel':
print("Initializing weights by copying (pretrained caffe weights).")
src2dsts = get_caffeSrc2Dst(self.net_arch)
copy_weights(self.state_dict(), 'caffemodel.%s' % (self.net_arch), src2dsts=src2dsts)
elif pretrained == 'torchmodel':
raise NotImplementedError
else:
raise NotImplementedError
return self
def fix_conv1_conv2(self):
# Fix the layers before conv3:
for layer in range(10):
for p in self.features[layer].parameters():
p.requires_grad = False
class VGGM_Trunk(_VGG_Trunk):
def __init__(self, init_weights=True):
super(VGGM_Trunk, self).__init__(vgg_type='vggm')
class VGG16_Trunk(_VGG_Trunk):
def __init__(self, init_weights=True):
super(VGG16_Trunk, self).__init__(vgg_type='vgg16')
class VGG19_Trunk(_VGG_Trunk):
def __init__(self, init_weights=True):
super(VGG19_Trunk, self).__init__(vgg_type='vgg19')
# -# def vgg11(pretrained=False, **kwargs):
# -# """VGG 11-layer model (configuration "A")
# -#
# -# Args:
# -# pretrained (bool): If True, returns a model pre-trained on ImageNet
# -# """
# -# if pretrained:
# -# kwargs['init_weights'] = False
# -# model = VGG_Trunk(make_layers(cfg['A']), **kwargs)
# -# if pretrained:
# -# model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
# -# return model
# -#
# -#
# -# def vgg11_bn(pretrained=False, **kwargs):
# -# """VGG 11-layer model (configuration "A") with batch normalization
# -#
# -# Args:
# -# pretrained (bool): If True, returns a model pre-trained on ImageNet
# -# """
# -# if pretrained:
# -# kwargs['init_weights'] = False
# -# model = VGG_Trunk(make_layers(cfg['A'], batch_norm=True), **kwargs)
# -# if pretrained:
# -# model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
# -# return model
# -#
# -#
# -# def vgg13(pretrained=False, **kwargs):
# -# """VGG 13-layer model (configuration "B")
# -#
# -# Args:
# -# pretrained (bool): If True, returns a model pre-trained on ImageNet
# -# """
# -# if pretrained:
# -# kwargs['init_weights'] = False
# -# model = VGG_Trunk(make_layers(cfg['B']), **kwargs)
# -# if pretrained:
# -# model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
# -# return model
# -#
# -#
# -# def vgg13_bn(pretrained=False, **kwargs):
# -# """VGG 13-layer model (configuration "B") with batch normalization
# -#
# -# Args:
# -# pretrained (bool): If True, returns a model pre-trained on ImageNet
# -# """
# -# if pretrained:
# -# kwargs['init_weights'] = False
# -# model = VGG_Trunk(make_layers(cfg['B'], batch_norm=True), **kwargs)
# -# if pretrained:
# -# model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
# -# return model
# def vgg16(pretrained=False, **kwargs):
# """VGG 16-layer model (configuration "D")
#
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# if pretrained:
# kwargs['init_weights'] = False
# model = VGG_Trunk(make_layers(cfg['D']), **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
# return model
#
#
# def vgg16_bn(pretrained=False, **kwargs):
# """VGG 16-layer model (configuration "D") with batch normalization
#
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# if pretrained:
# kwargs['init_weights'] = False
# model = VGG_Trunk(make_layers(cfg['D'], batch_norm=True), **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
# return model
#
#
# def vgg19(pretrained=False, **kwargs):
# """VGG 19-layer model (configuration "E")
#
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# if pretrained:
# kwargs['init_weights'] = False
# model = VGG_Trunk(make_layers(cfg['E']), **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
# return model
#
#
# def vgg19_bn(pretrained=False, **kwargs):
# """VGG 19-layer model (configuration 'E') with batch normalization
#
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# if pretrained:
# kwargs['init_weights'] = False
# model = VGG_Trunk(make_layers(cfg['E'], batch_norm=True), **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
# return model
#
# import torch.nn as nn
# import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
from easydict import EasyDict as edict
# import math
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit
class Test_Net(nn.Module):
def __init__(self, nr_cate=3, _Trunk=VGG16_Trunk):
super(Test_Net, self).__init__()
self.truck = _Trunk() # .copy_weights()
self.nr_cate = nr_cate
self.maskout = Maskout(nr_cate=nr_cate)
def forward(self, x, label):
x = self.truck(x)
# your code here
batchsize = x.size(0)
# -- Head architecture
self.head_s2 = nn.Sequential(
nn.Linear(4096, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 3), # 252=3*3
)
self.head_s1 = nn.Sequential(
nn.Linear(4096, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 2),
)
# self.maskout = Maskout(nr_cate=nr_cate)
# Note: s1(x,y) is on a circle and (x^2+y^2=1)
x_s2 = self.maskout(self.head_s2(x).view(batchsize, self.nr_cate, 3), label)
x_s1 = self.maskout(self.head_s1(x).view(batchsize, self.nr_cate, 2), label)
# -- Normalize coordinate to a unit
x_s2 = norm2unit(x_s2) # , nr_cate=self.nr_cate)
x_s1 = norm2unit(x_s1) # , nr_cate=self.nr_cate)
Pred = edict(s2=x_s2, s1=x_s1)
return Pred
if __name__ == '__main__':
model = Test_Net(_Trunk=VGGM_Trunk) # .copy_weights() # pretrained=True
# import numpy as np
dummy_batch_data = np.zeros((2, 3, 224, 224), dtype=np.float32)
dummy_batch_label = np.zeros((2, 1), dtype=np.int64)
dummy_batch_data = torch.autograd.Variable(torch.from_numpy(dummy_batch_data))
dummy_batch_label = torch.autograd.Variable(torch.from_numpy(dummy_batch_label))
print(model)
exit()
Pred = model(dummy_batch_data, dummy_batch_label)
print(Pred.s2)
| 20,743 | 41.859504 | 114 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/netutil/common_v2/__init__.py | # from trunck_alexnet_bvlc import AlexNet_Trunk
# from trunck_alexnet_pytorch import AlexNet_Trunk
| 102 | 33.333333 | 50 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/netutil/common_v2/trunk_resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
from pytorch_util.libtrain import init_weights_by_filling, cfg as pretrained_cfg
# __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
__all__ = ['_ResNet_Trunk'] # , 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
# def get_func_xNorm(xNorm='batch', affine=True):
def _nn_xNorm(num_channels, xNorm='BN', **kwargs): # kwargs: BN: {affine=False}, LN: {elementwise_affine=True}
# ---By Shuai-- num_features = num_channels ?
""" E.g.
Fixed BN: xNorm='BN', affine=False
"""
# print " _nn_xNorm: ", xNorm, kwargs
if xNorm == 'BN':
return nn.BatchNorm2d(num_channels, **kwargs) # affine=affine)
elif xNorm == 'GN':
return nn.GroupNorm(num_groups=32, num_channels=num_channels, **kwargs) # affine=affine) # num_channels,
elif xNorm == 'IN':
return nn.InstanceNorm2d(num_channels, **kwargs) # affine=False) # default is affine=False
elif xNorm == 'LN':
# TODO how to calculate normalized_shape?
# return nn.LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True)
raise NotImplementedError
class nnAdd(nn.Module):
def forward(self, x0, x1):
return x0 + x1
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, xNorm='BN', xNorm_kwargs={}):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = _nn_xNorm(planes, xNorm=xNorm, **xNorm_kwargs) # nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = _nn_xNorm(planes, xNorm=xNorm, **xNorm_kwargs) # nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
# ---By Shuai--
self.merge = nnAdd()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
# out += residual
out = self.merge(out, residual)
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, xNorm='BN', xNorm_kwargs={}):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = _nn_xNorm(planes, xNorm=xNorm,
**xNorm_kwargs) # nn.BatchNorm2d(planes) # Note: pytorch 'BatchNorm2d' include: BatchNorm+Scale
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = _nn_xNorm(planes, xNorm=xNorm, **xNorm_kwargs) # nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = _nn_xNorm(planes * 4, xNorm=xNorm, **xNorm_kwargs) # nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# ---By Shuai--
self.merge = nnAdd()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
# out += residual
out = self.merge(out, residual)
out = self.relu(out)
return out
type2blockFunc_layerCfgs = dict(
resnet18=(BasicBlock, [2, 2, 2, 2]),
resnet34=(BasicBlock, [3, 4, 6, 3]),
resnet50=(Bottleneck, [3, 4, 6, 3]),
resnet101=(Bottleneck, [3, 4, 23, 3]),
resnet152=(Bottleneck, [3, 8, 36, 3]),
)
class _ResNet_Trunk(nn.Module):
def __init__(self, res_type='resnet101', pretrained='torchmodel',
# True, # pretrained = {True/False, caffemodel, torchmodel}
# init_weights=None, # [Warning] deprecated! for legacy
start=None, end=None, xNorm='BN', xNorm_kwargs={}):
self.inplanes = 64
super(_ResNet_Trunk, self).__init__()
self.net_arch = res_type # @Shine: added
blockFunc, layerCfgs = type2blockFunc_layerCfgs[res_type]
# ---------------------------------------------------------------[net definition]
# For cifar: self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) # conv1
self.bn1 = _nn_xNorm(64, xNorm=xNorm, **xNorm_kwargs) # nn.BatchNorm2d(64) # bn_conv1 + scale_conv1
self.relu = nn.ReLU(inplace=True) # conv1_relu
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # pool1
# -#
self.layer1 = self._make_layer(blockFunc, 64, layerCfgs[0], stride=1, xNorm='BN',
xNorm_kwargs={}) # res2{a,b,c}
self.layer2 = self._make_layer(blockFunc, 128, layerCfgs[1], stride=2, xNorm='BN',
xNorm_kwargs={}) # res3{a,b1-b3} res3b3_relu [B, 512, 28, 28]
self.layer3 = self._make_layer(blockFunc, 256, layerCfgs[2], stride=2, xNorm='BN',
xNorm_kwargs={}) # res4{a,b1-b22} output [B, 1024, 14, 14]
self.layer4 = self._make_layer(blockFunc, 512, layerCfgs[3], stride=2, xNorm='BN',
xNorm_kwargs={}) # res5{a,b,c} output [B, 2048, 7, 7]
# -#
self.pool5 = nn.AvgPool2d(7, stride=1) # output [B, 2048, 1, 1] @Shine: used to be self.avgpool
# -# self.fc = nn.Linear(512 * block.expansion, num_classes) # @Shine: fc1000 removed
# ---------------------------------------------------------------
self._layer_names = [name for name, module in self.named_children()]
# same as: [ 'conv1', 'bn1', 'relu', 'maxpool',
# 'layer1','layer2','layer3','layer4','pool5' ]
if pretrained == True:
pretrained = 'torchmodel'
if pretrained == False:
pretrained = None
#
assert pretrained in [None, 'caffemodel', 'torchmodel'], "Unknown pretrained: %s" % pretrained
print('[_ResNet_Trunk] init_weights:', pretrained)
self.init_weights(pretrained=pretrained)
# build a short cut by sequence
self.truck_seq = self.sub_seq(start=start, end=end) # 'conv1', 'pool5'
def sub_seq(self, start=None, end=None): # 'conv1', 'pool5'
# select sub-sequence from trunk (by creating a shot cut).
assert start is None or start in self._layer_names, '[Error] %s is not in %s' % (start, self._layer_names)
assert end is None or end in self._layer_names, '[Error] %s is not in %s' % (end, self._layer_names)
start_ind = self._layer_names.index(start) if (start is not None) else 0
end_ind = self._layer_names.index(end) if (end is not None) else len(self._layer_names) - 1
assert start_ind <= end_ind
self.selected_layer_name = self._layer_names[start_ind:end_ind + 1]
print("Selected sub-sequence: %s" % self.selected_layer_name)
_seq = nn.Sequential(*[self.__getattr__(x) for x in self.selected_layer_name])
return _seq
def _make_layer(self, block, planes, blocks, stride, xNorm, xNorm_kwargs):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion),
_nn_xNorm(planes * block.expansion, xNorm=xNorm, **xNorm_kwargs)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, xNorm=xNorm, xNorm_kwargs=xNorm_kwargs))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, xNorm=xNorm, xNorm_kwargs=xNorm_kwargs))
return nn.Sequential(*layers)
# @Interface
def forward(self, x): # In-replace forward_trunk
""" x is input image data.
x is of shape: (batchsize, 3, 224, 224) """
assert x.size()[1:] == (3, 224, 224), "resnet need (3,224,224) input data, whereas %s is received." % str(
tuple(x.size()[1:]))
# return self.truck_seq(x)
if self.selected_layer_name[-1] == 'pool5':
return self.truck_seq(x).view(x.size(0), -1) # view as (batchsize,2048)
else:
return self.truck_seq(x)
# @staticmethod
def init_weights(self, pretrained='caffemodel'):
""" Two ways to init weights:
1) by copying pretrained weights.
2) by filling empirical weights. (e.g. gaussian, xavier, uniform, constant, bilinear).
"""
if pretrained is None: # [Warning] deprecated! for legacy
print('initialize weights by filling (Fc:gaussian, Conv:kaiming_normal).')
init_weights_by_filling(self, silent=False) # (self, gaussian_std=0.05, kaiming_normal=True)
elif pretrained == 'caffemodel':
model_path = pretrained_cfg.caffemodel.resnet101.model
print("Loading caffe pretrained weights from %s" % (model_path))
state_dict = torch.load(model_path)
# initialize weights by copying
self.load_state_dict({k: v for k, v in state_dict.items() if k in self.state_dict()})
elif pretrained == 'torchmodel':
print("Loading pytorch pretrained weights from %s" % (model_urls[self.net_arch]))
state_dict = model_zoo.load_url(model_urls[self.net_arch], progress=True)
self.load_state_dict(state_dict, strict=False)
else:
raise NotImplementedError
return self
class ResNet18_Trunk(_ResNet_Trunk):
def __init__(self, **kwargs):
_ResNet_Trunk.__init__(self, res_type='resnet18', **kwargs)
class ResNet34_Trunk(_ResNet_Trunk):
def __init__(self, **kwargs):
_ResNet_Trunk.__init__(self, res_type='resnet34', **kwargs)
class ResNet50_Trunk(_ResNet_Trunk):
def __init__(self, **kwargs):
_ResNet_Trunk.__init__(self, res_type='resnet50', **kwargs)
class ResNet101_Trunk(_ResNet_Trunk):
def __init__(self, **kwargs):
_ResNet_Trunk.__init__(self, res_type='resnet101', **kwargs)
class ResNet152_Trunk(_ResNet_Trunk):
def __init__(self, **kwargs):
_ResNet_Trunk.__init__(self, res_type='resnet152', **kwargs)
import numpy as np
from easydict import EasyDict as edict
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit
class Test_Net(nn.Module):
def __init__(self, nr_cate=3, _Trunk=ResNet101_Trunk):
super(Test_Net, self).__init__()
self.truck = _Trunk() # or _Trunk(end='pool5')
self.nr_cate = nr_cate
# -- Head architecture
self.head_s2 = nn.Sequential(
nn.Linear(2048, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 3), # 252=3*3
)
self.head_s1 = nn.Sequential(
nn.Linear(2048, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 2),
)
self.maskout = Maskout(nr_cate=nr_cate)
init_weights_by_filling(self.head_s2)
init_weights_by_filling(self.head_s1)
def forward(self, x, label):
# forward truck
x = self.truck(x)
# your code here
batchsize = x.size(0)
x = x.view(batchsize, -1) # viewed as (batchsize, 2048)
# Note: s1(x,y) is on a circle and (x^2+y^2=1)
x_s2 = self.maskout(self.head_s2(x).view(batchsize, self.nr_cate, 3), label)
x_s1 = self.maskout(self.head_s1(x).view(batchsize, self.nr_cate, 2), label)
# -- Normalize coordinate to a unit
x_s2 = norm2unit(x_s2) # , nr_cate=self.nr_cate)
x_s1 = norm2unit(x_s1) # , nr_cate=self.nr_cate)
Pred = edict(s2=x_s2, s1=x_s1)
return Pred
if __name__ == '__main__':
model = Test_Net() # .copy_weights() # pretrained=True
# import numpy as np
dummy_batch_data = np.zeros((2, 3, 224, 224), dtype=np.float32)
dummy_batch_label = np.zeros((2, 1), dtype=np.int64)
dummy_batch_data = torch.autograd.Variable(torch.from_numpy(dummy_batch_data))
dummy_batch_label = torch.autograd.Variable(torch.from_numpy(dummy_batch_label))
Pred = model(dummy_batch_data, dummy_batch_label)
print(Pred.s2)
| 13,777 | 40.251497 | 125 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/netutil/common_v2/trunk_alexnet_pytorch.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from basic.common import rdict
import numpy as np
from easydict import EasyDict as edict
from pytorch_util.libtrain import copy_weights
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit
__all__ = ['AlexNet_Trunk'] # , 'alexnet']
# nr_cate = 3
''' Name_in_caffe
features.0.weight --> features.0.weight conv1
features.0.bias --> features.0.bias
features.3.weight --> features.3.weight conv2
features.3.bias --> features.3.bias
features.6.weight --> features.6.weight conv3
features.6.bias --> features.6.bias
features.8.weight --> features.8.weight conv4
features.8.bias --> features.8.bias
features.10.weight --> features.10.weight conv5
features.10.bias --> features.10.bias
classifier.1.weight --> classifier.1.weight fc6
classifier.1.bias --> classifier.1.bias
classifier.4.weight --> classifier.4.weight fc7
classifier.4.bias --> classifier.4.bias
classifier.6.weight --> classifier.6.weight fc8
classifier.6.bias --> classifier.6.bias
'''
class AlexNet_Trunk(nn.Module):
def __init__(self):
super(AlexNet_Trunk, self).__init__()
# -- Trunk architecture
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
# [ 0] Conv1 Note: bvlc alexnet is 96 instead of 64 here.
nn.ReLU(inplace=True), # [ 1]
nn.MaxPool2d(kernel_size=3, stride=2), # [ 2]
nn.Conv2d(64, 192, kernel_size=5, padding=2), # [ 3] Conv2
nn.ReLU(inplace=True), # [ 4]
nn.MaxPool2d(kernel_size=3, stride=2), # [ 5]
nn.Conv2d(192, 384, kernel_size=3, padding=1), # [ 6] Conv3
nn.ReLU(inplace=True), # [ 7]
nn.Conv2d(384, 256, kernel_size=3, padding=1), # [ 8] Conv4
nn.ReLU(inplace=True), # [ 9]
nn.Conv2d(256, 256, kernel_size=3, padding=1), # [10] Conv5
nn.ReLU(inplace=True), # [11]
nn.MaxPool2d(kernel_size=3, stride=2), # [12]
)
self.classifier = nn.Sequential(
nn.Dropout(), # [0] Note: bvlc alexnet Dropout should follow after Fc
nn.Linear(256 * 6 * 6, 4096), # [1] Fc6
nn.ReLU(inplace=True), # [2]
nn.Dropout(), # [3]
nn.Linear(4096, 4096), # [4] Fc7
nn.ReLU(inplace=True), # [5]
# nn.Linear(4096, 1000), # num_classes), #[6] Prob Removed.
)
# @Interface
def forward_Conv(self, x):
return self.features(x) # up to pool5
# @Interface
def forward_Fc(self, x):
x = x.view(x.size(0), -1)
return self.classifier(x) # up to fc7
# def forward_trunk(self, x):
# """ x is input image data.
# x is of shape: (batchsize, 3, 227, 227) """
# # forward convolutional layers
# x = self.features(x)
# #
# # forward fully connected layers
# batchsize = x.size(0) # .split(1, dim=1)
# x = x.view(batchsize, 256 * 6 * 6)
# x = self.classifier(x)
# #
# return x
# @Interface
def forward(self, x): # In-replace forward_trunk
""" x is input image data.
x is of shape: (batchsize, 3, 227, 227) """
# forward convolutional layers
x = self.features(x)
#
# forward fully connected layers
batchsize = x.size(0) # .split(1, dim=1)
x = x.view(batchsize, 256 * 6 * 6)
x = self.classifier(x)
#
return x
@staticmethod
def init_weights(self):
copy_weights(self.state_dict(), 'torchmodel.alexnet', strict=False)
return self
class Test_AlexNet(nn.Module):
def __init__(self, nr_cate=3, _Trunk=AlexNet_Trunk):
super(Test_AlexNet, self).__init__()
self.truck = _Trunk().copy_weights()
self.nr_cate = nr_cate
self.maskout = Maskout(nr_cate=nr_cate)
def forward(self, x, label):
x = self.truck(x)
# your code here
batchsize = x.size(0)
# -- Head architecture
self.head_s2 = nn.Sequential(
nn.Linear(4096, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 3), # 252=3*3
)
self.head_s1 = nn.Sequential(
nn.Linear(4096, 84),
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(84, self.nr_cate * 2),
)
# self.maskout = Maskout(nr_cate=nr_cate)
# Note: s1(x,y) is on a circle and (x^2+y^2=1)
x_s2 = self.maskout(self.head_s2(x).view(batchsize, self.nr_cate, 3), label)
x_s1 = self.maskout(self.head_s1(x).view(batchsize, self.nr_cate, 2), label)
# -- Normalize coordinate to a unit
x_s2 = norm2unit(x_s2) # , nr_cate=self.nr_cate)
x_s1 = norm2unit(x_s1) # , nr_cate=self.nr_cate)
Pred = edict(s2=x_s2, s1=x_s1)
return Pred
if __name__ == '__main__':
model = Test_AlexNet() # .copy_weights() # pretrained=True
# import numpy as np
dummy_batch_data = np.zeros((2, 3, 227, 227), dtype=np.float32)
dummy_batch_label = np.zeros((2, 1), dtype=np.int64)
dummy_batch_data = torch.autograd.Variable(torch.from_numpy(dummy_batch_data))
dummy_batch_label = torch.autograd.Variable(torch.from_numpy(dummy_batch_label))
Pred = model(dummy_batch_data, dummy_batch_label)
print(Pred.s2)
| 5,746 | 35.144654 | 91 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/libtrain/reducer_v2.py | import numpy as np
import torch
'''
Class hierarchy:
net_reducer
// || \\
// || \\
// || \\
// || \\
reducer_group(a) reducer_group(e) reducer_group(t)
/ | \
/ | \
reducer(gt) reducer(pred) reducer(loss)
'''
class reducer:
"""Bind one blob to a reducer for collection of batch data."""
def __init__(self, post_func=None, as_numpy=True, **func_kwargs):
self.batch_data_list = [] # for collecting batch data.
self.nr_batch = 0 # number of batch collected.
self.blob_handler = (post_func, func_kwargs)
self.as_numpy = as_numpy
def squeeze(self, arr, dim=None):
if isinstance(arr, np.ndarray):
return np.squeeze(arr, axis=dim)
elif isinstance(arr, torch.Tensor):
# Although dim is optional args in pytorch, it's not allowed to be None when it's explicitly specified.
return torch.squeeze(arr, dim=dim) if (dim is not None) else torch.squeeze(arr)
else:
print("Unknown array type: ", type(arr))
raise NotImplementedError
def cat(self, list_arr, dim=0):
assert isinstance(list_arr, list) or isinstance(list_arr, tuple), type(list_arr)
_arr = list_arr[0]
if isinstance(_arr, np.ndarray):
return np.concatenate(list_arr, axis=dim)
elif isinstance(_arr, torch.Tensor):
return torch.cat(list_arr, dim=dim)
else:
print("Unknown array type: ", type(_arr))
raise NotImplementedError
def reset(self):
self.batch_data_list = [] # for collecting batch data.
self.nr_batch = 0 # number of batch collected.
def resume(self, pre_batch_data):
self.batch_data_list = [pre_batch_data]
def collect(self, batch_data, squeeze=True):
post_func, func_kwargs = self.blob_handler
if isinstance(batch_data, torch.Tensor):
batch_data = batch_data.data # .cpu().numpy().copy()
# if not isinstance(batch_data, np.ndarray): # batch_data is still pytorch tensor.
# batch_data = batch_data.data.cpu().numpy().copy()
if squeeze:
batch_data = self.squeeze(batch_data) # .reshape((-1,))
if post_func is not None:
batch_data = post_func(batch_data, **func_kwargs)
if squeeze:
batch_data = self.squeeze(batch_data)
if batch_data.shape == ():
# TODO: what if reduce array is not of shape (batch_size, 1), but (batch_size, c, h, w)?
batch_data = batch_data.reshape((-1,)) # hack for preventing squeeze single value array.
self.batch_data_list.append(batch_data)
self.nr_batch += 1
# just return a copy of batch_data in case needed
return batch_data
def reduce(self, reset=False): # , blobs=None):
assert len(self.batch_data_list) > 0, "[Exception] No data to reduce."
concated_data = self.cat(self.batch_data_list, dim=0)
if reset:
self.reset()
if isinstance(concated_data, torch.Tensor) and self.as_numpy:
if concated_data.is_cuda:
concated_data = concated_data.data.cpu()
return concated_data.numpy().copy()
else:
return concated_data
class reducer_group:
def __init__(self, target_names, post_func=None, as_numpy=True, **func_kwargs):
self.names = target_names # name is gt, pred, scr, loss
self.name2reducer = {}
for name in self.names:
self.name2reducer[name] = reducer(post_func=post_func, as_numpy=as_numpy, **func_kwargs)
def reset(self):
for name in self.names:
self.name2reducer[name].reset()
def resume(self, name2pre_batch_data):
for name in self.names:
self.name2reducer[name].resume(name2pre_batch_data[name])
def collect(self, tgts_dict, squeeze=True):
"""collect add new batch data to list."""
name2batch_data = {}
for name, var in tgts_dict.items():
batch_data = self.name2reducer[name].collect(var, squeeze=squeeze)
name2batch_data[name] = batch_data
# just return a copy of batch_data in case needed
return name2batch_data
def reduce(self, reset=False):
"""reduce only return data, will change anything."""
name2data = {} # edict()
for name, reducer in self.name2reducer.items():
name2data[name] = reducer.reduce()
if reset:
self.reset()
return name2data
'''
class reducer_group:
def __init__(self, **name2varName):
self.name2varName = name2varName # name is gt, pred, scr, loss
self.name2reducer = {}
for name, varName in self.name2varName.items():
if isinstance(varName, tuple):
if len(varName)==2:
varName, post_func = varName
func_kwargs = dict() # No func kwargs.
elif len(varName)==3:
varName, post_func, func_kwargs = varName
else:
print "Don't don't how to unpack: blobname, post_func, func_kwargs: %s " % (str(varName))
raise NotImplementedError
self.name2reducer[name] = reducer(post_func, **func_kwargs)
else: # string, not ppost_func
self.name2reducer[name] = reducer()
def collect(self, tgts_dict):
"""collect add new batch data to list."""
name2batch_data = {}
for name, var in tgts_dict.items():
batch_data = self.name2reducer[name].collect(var)
name2batch_data[name] = batch_data
# just return a copy of batch_data in case needed
return name2batch_data
def reduce(self):
"""reduce only return data, will change anything."""
name2data = {} # edict()
for name, reducer in self.name2reducer.items():
name2data[name] = reducer.reduce()
return name2data
'''
# class net_reducer:
# """A netreducer handel all groups of reducer."""
# def __init__(self, net):
# self.net = net
# self.groups = {}
#
# def add_group(self, gname, **name2blobname):
# self.groups[gname] = reducer_group(self.net, **name2blobname)
#
# def collect(self):
# g_name2g_batch_data = {}
# for g_name, group in self.groups.items():
# g_batch_data = group.collect() # a dictionary
# g_name2g_batch_data[g_name] = g_batch_data
#
# # just return a copy of batch_data in case needed
# return g_name2g_batch_data
#
# def reduce(self):
# group2data = {} # group name to data
# for g_name, group in self.groups.items():
# group2data[g_name] = group.reduce()
# return group2data
#
#
# if __name__=="__main__":
# net = None
#
# rd = reducer(net)
# rd.add('e3')
#
# # g_redu = reducer_group(net)
# # g_redu.add_group("a", gt='e3', pred='prob_e3', scr='prob_e3', loss='loss_e3', acc='acc_e3')
# # g_redu.add_group("e", gt='e2', pred='prob_e2', scr='prob_e2', loss='loss_e2', acc='acc_e2')
# # g_redu.add_group("t", gt='e1', pred='prob_e1', scr='prob_e1', loss='loss_e1', acc='acc_e1')
#
# g_redu = net_reducer(net)
# g_redu.add_group("a", gt='e3', scr='prob_e3', loss='loss_e3', acc='acc_e3')
# g_redu.add_group("e", gt='e2', scr='prob_e2', loss='loss_e2', acc='acc_e2')
# g_redu.add_group("t", gt='e1', scr='prob_e1', loss='loss_e1', acc='acc_e1')
#
#
#
#
#
#
#
# '''
# class gt(reduce):
# def __init__(self, net, blobname):
# Layer.__init__(self, type='scalar')
#
# gt = reduce("e3")
#
# # # for classification
# # name2gt = {} # gt class label
# # name2pred = {} # pred class label
# # name2score = {} # pred scores for all classes
#
# # ------ Prefix -------
# # Accuary: acc
# # Softmax: prob
# # Loss: loss
# #
# # group_type2blobname
# group("a", gt='e3', pred='prob_e3', scr='prob_e3', loss='loss_e3', acc='acc_e3')
# reduce_dict = { # Softmax output Softmax output SoftmaxLoss Accurary
# "a": oedict(gt='e3', pred='prob_e3', scr='prob_e3', loss='loss_e3', acc='acc_e3'), # np.argmax, axis=-1
# "e": oedict(gt='e2', pred='prob_e2', scr='prob_e2', loss='loss_e2', acc='acc_e2'),
# "t": oedict(gt='e1', pred='prob_e1', scr='prob_e1', loss='loss_e1', acc='acc_e1'),
# }
#
# # reduce_group = { # Softmax output Softmax output SoftmaxLoss Accurary
# # "a": oedict(gt='e3', pred=(np.argmax,'prob_e3'), scr='prob_e3', loss='loss_e3', acc='acc_e3'), # np.argmax, axis=-1
# # "e": oedict(gt='e2', pred=(np.argmax,'prob_e2'), scr='prob_e2', loss='loss_e2', acc='acc_e2'),
# # "t": oedict(gt='e1', pred=(np.argmax,'prob_e1'), scr='prob_e1', loss='loss_e1', acc='acc_e1'),
# # }
#
# from basic.common import RefObj as rdict
#
# class extractor:
# """Extract data from blob"""
# if __init__(self, blobname, )
#
#
# class batch_reducer:
# """Collect and reduce the results from each batch and concatenate them into one.
# Reduce type:
# - scalar
# - vector (softmax or simply feature.)
# - vector with argmax.
# Type name:
# gt, pred, scr, loss, acc
# It results in:
# name2gt, name2pred, name2scr, name2loss, name2acc (if exists)
# """
# scalar=set(['gt', 'pred', 'loss','acc'])
# vector=set(['scr'])
#
# def __init__(self, netblobs, **kwargs):
# # self.reduce_dict =
# self.name2gt = {}
#
# def collect(self, post_func=None, **func_kwargs):
# for key, tgt in kwargs:
# name2tgt = 'name2%s' % key
# if key in ['gt','pred','scr','loss','acc']:
# if not hasattr(self, name2tgt ):
# self.__setattr__(name2tgt) = {} # create dict.
#
# if key in scalar:
# assert netblobs[tgt].data.shape==1
# elif key in vector:
# assert netblobs[tgt].data.shape>1
# else: raise NotImplementedError
#
# if post_func is None:
# batch_data = netblobs[tgt].data.copy()
# else:
# batch_data = post_func( netblobs[tgt].data.copy(), func_kwargs)
# self.__getattr__(name2tgt).setdefault(tgt, []).append( batch_data )
# else:
# print "[Error] Don't know how to reduce type: %s " % key
# raise NotImplementedError
#
# def reduce(self)
# for key, tgt in kwargs:
# name2tgt = 'name2%s' % key
# if key in ['gt','pred','scr','loss','acc']:
# # if not hasattr(self, name2tgt ):
# # self.__setattr__(name2tgt) = {} # create dict.
# concated = np.concatenate( self.__getattr__(name2tgt), axis=0)
# self.__setattr__(name2tgt, concated )
#
# if key=='gt':
# self.name2gt.setdefault(tgt, []).append( netblobs[key].data.copy() )
# elif key=='pred': # reduce from Softmax by applying argmax
# self.name2gt.setdefault(tgt, []).append( netblobs[key].data.copy() )
# elif key=='scr': # reduce from Softmax
# self.name2gt.setdefault(tgt, []).append( netblobs[key].data.copy() )
# elif key=='loss': # reduce from Loss layer output.
# self.name2gt.setdefault(tgt, []).append( netblobs[key].data.copy() )
# elif key=='acc': # reduce from Accuray layer.
# self.name2gt.setdefault(tgt, []).append( netblobs[key].data.copy() )
# else:
# print "Don't know how to reduce type: %s " % key
# raise NotImplementedError
# def
# '''
#
#
| 12,331 | 38.149206 | 136 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/libtrain/tools.py | import torch
from collections import OrderedDict
def get_stripped_DataParallel_state_dict(m, base_name='', newdict=OrderedDict()):
""" strip 'module.' caused by DataParallel.
"""
try:
# Test if any child exist.
# m.children().next()
next(m.children())
# Pass test, has one or more children
if isinstance(m, torch.nn.DataParallel):
assert len(
[x for x in m.children()]) == 1, "DataParallel module should only have one child, namely, m.module"
get_stripped_DataParallel_state_dict(m.module, base_name, newdict)
else:
for _name, _module in m.named_children():
new_base_name = base_name + '.' + _name if base_name != '' else _name
get_stripped_DataParallel_state_dict(_module, new_base_name, newdict)
return newdict # if ended here, return newdict
except StopIteration:
# No children any more.
assert not isinstance(m,
torch.nn.DataParallel), 'Leaf Node cannot be "torch.nn.DataParallel" (since no children ==> no *.module )'
for k, v in m.state_dict().items():
new_k = base_name + '.' + k
newdict[new_k] = v
return newdict # if ended here, return newdict
def patch_saved_DataParallel_state_dict(old_odict):
print(
"[Warning]\tNot recommend to use for unknown modules!\n\t\t\tUnless you're sure 'module.' is called by DataParallel.")
assert isinstance(old_odict, OrderedDict)
new_odict = OrderedDict()
for k, v in old_odict.items():
ind = k.find('module.')
if ind >= 0:
new_k = k.replace('module.', '')
new_odict[new_k] = v
print('\t[renaming] %-40s --> %-40s' % (k, new_k))
else:
new_odict[k] = v
return new_odict
def test0():
from torchvision.models import alexnet
model = alexnet()
org_state_dict = model.state_dict().copy()
print(org_state_dict.keys())
# make feature parallel.
# Test 1.
model.features = torch.nn.DataParallel(model.features)
# Test 2.
# model = torch.nn.DataParallel(model)
print(model.state_dict().keys())
# print model.features.module[0].state_dict().keys() #.children().next() # [x for x in model.features.module[0].children()]
# print isinstance(model.features.module[0], torch.nn.DataParallel)
# exit()
print('-------------------[patch_state_dict]')
new_state_dict1 = patch_saved_DataParallel_state_dict(model.state_dict())
print(new_state_dict1.keys())
print('-------------------')
assert org_state_dict.keys() == new_state_dict1.keys()
# exit()
print('-------------------[get_stripped_state_dict]')
new_state_dict2 = get_stripped_DataParallel_state_dict(model)
print(new_state_dict2.keys())
print('-------------------')
assert org_state_dict.keys() == new_state_dict2.keys()
for k in org_state_dict.keys():
assert torch.equal(org_state_dict[k], new_state_dict2[k])
if __name__ == '__main__':
test0()
| 3,106 | 34.712644 | 136 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/libtrain/yaml_netconf.py | import os, sys
# from cafferas import list_models, copy_weights
from basic.common import add_path
from basic.util import parse_yaml, dump_yaml, load_yaml
# import yaml
# import oyaml as yaml # oyaml is a drop-in replacement for PyYAML which preserves dict ordering.
from pprint import pprint
# from ordered_easydict import Ordered_EasyDict as oedict # my module
import json
# def parse_yaml(conf_yaml_str):
# # from easydict import EasyDict as edict
# print ('---------------Parsing yaml------------------[oyaml]')
# _opt = yaml.load(conf_yaml_str) #
# return _opt # as Ordered_Dict
# # opt = oedict(_opt)
# # return opt
#
# def dump_yaml(odict, filepath=None):
# _opt = yaml.dump(odict) #
# if filepath is not None:
# with open(filepath, 'w') as f:
# f.write(_opt)
# for checking the loaded yaml conf.
def print_odict(odict):
print(json.dumps(odict, indent=2, separators=(',', ': ')))
from importlib import import_module as _import_module
def import_module(info):
# for module_name, info in module_infos:
print("[import_module] ", info['from'])
if 'path' in info:
add_path(info['path'])
print(' add_path: ', info['path'])
mod = _import_module(info['from'])
if 'import' in info:
comps = []
for comp in info['import']:
comps.append(getattr(mod, comp))
return comps
else:
return mod
# return getattr(mod, 'DataEngine') # __getattr__['DataEngine']
# DataEngine, get_anno, _cfg
# return
def import_module_v2(info):
# for module_name, info in module_infos:
print("[import_module] ", info['from'])
if 'path' in info:
add_path(info['path'])
print(' add_path: ', info['path'])
mod = _import_module(info['from'])
if 'import' in info:
comps = []
for comp, kwargs in info['import'].items():
# print '>>>>', comp, kwargs
try:
if kwargs is None: # comp is variable
_var = getattr(mod, comp)
comps.append(_var)
else: # comp is function with kwargs
_func = getattr(mod, comp)
comps.append((_func, kwargs)) # (_func(**kwargs)) #
except Exception as inst:
print('\n[Exception] %s' % inst)
pprint(dict(info)) # (comp, kwargs)
return comps
else:
return mod
| 2,463 | 30.189873 | 98 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/libtrain/reducer.py | import numpy as np
import torch
'''
Class hierarchy:
net_reducer
// || \\
// || \\
// || \\
// || \\
reducer_group(a) reducer_group(e) reducer_group(t)
/ | \
/ | \
reducer(gt) reducer(pred) reducer(loss)
'''
class reducer:
"""Bind one blob to a reducer for collection of batch data."""
def __init__(self, post_func=None, **func_kwargs):
self.batch_data_list = [] # for collecting batch data.
self.nr_batch = 0 # number of batch collected.
self.blob_handler = (post_func, func_kwargs)
def reset(self):
self.batch_data_list = [] # for collecting batch data.
self.nr_batch = 0 # number of batch collected.
def resume(self, pre_batch_data):
self.batch_data_list = [pre_batch_data]
def collect(self, batch_data, squeeze=True):
post_func, func_kwargs = self.blob_handler
if not isinstance(batch_data, np.ndarray): # batch_data is still pytorch tensor.
batch_data = batch_data.data.cpu().numpy().copy()
# batch_data = self.net.blobs[self.blobname].data.copy()
# batch_data = batch_data.numpy().copy()
if squeeze:
batch_data = np.squeeze(batch_data) # .reshape((-1,))
if post_func is not None:
batch_data = post_func(batch_data, **func_kwargs)
if squeeze:
batch_data = np.squeeze(batch_data)
if batch_data.shape == ():
# TODO: what if reduce array is not of shape (batch_size, 1), but (batch_size, c, h, w)?
batch_data = batch_data.reshape((-1,)) # hack for preventing squeeze single value array.
self.batch_data_list.append(batch_data)
self.nr_batch += 1
# just return a copy of batch_data in case needed
return batch_data
def reduce(self, reset=False): # , blobs=None):
assert len(self.batch_data_list) > 0, "[Exception] No data to reduce. blobname: %s" % self.blobname
batchdata_shape = self.batch_data_list[0].shape
if len(batchdata_shape) == 0: # scalar for each batch: loss accurary blobs
concated_data = np.vstack(self.batch_data_list).reshape((-1,)) # result in (nr_batch, ) array
else: # if len(batchdata_shape)==1:
concated_data = np.concatenate(self.batch_data_list, axis=0)
if reset:
self.reset()
return concated_data
class reducer_group:
def __init__(self, *names):
self.names = names # name is gt, pred, scr, loss
self.name2reducer = {}
for name in self.names:
if isinstance(name, tuple):
if len(name) == 2:
name, post_func = name
func_kwargs = dict() # No func kwargs.
elif len(name) == 3:
name, post_func, func_kwargs = name
else:
print("Don't don't how to unpack: blobname, post_func, func_kwargs: %s " % (str(name)))
raise NotImplementedError
self.name2reducer[name] = reducer(post_func, **func_kwargs)
else: # string, not ppost_func
self.name2reducer[name] = reducer()
def reset(self):
for name in self.names:
self.name2reducer[name].reset()
def resume(self, name2pre_batch_data):
for name in self.names:
self.name2reducer[name].resume(name2pre_batch_data[name])
def collect(self, tgts_dict, squeeze=True):
"""collect add new batch data to list."""
name2batch_data = {}
for name, var in tgts_dict.items():
batch_data = self.name2reducer[name].collect(var, squeeze=squeeze)
name2batch_data[name] = batch_data
# just return a copy of batch_data in case needed
return name2batch_data
def reduce(self, reset=False):
"""reduce only return data, will change anything."""
name2data = {} # edict()
for name, reducer in self.name2reducer.items():
name2data[name] = reducer.reduce()
if reset:
self.reset()
return name2data
| 4,395 | 37.561404 | 107 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.