repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
probdet | probdet-master/src/core/evaluation_tools/scoring_rules.py | import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def sigmoid_compute_cls_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for multilabel classification results provided by retinanet.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and brier score.
"""
output_dict = {}
num_forecasts = input_matches['predicted_cls_probs'][valid_idxs].shape[0]
# Construct binary probability vectors. Essential for RetinaNet as it uses
# multilabel and not multiclass formulation.
predicted_class_probs = input_matches['predicted_score_of_gt_category'][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_class_probs.shape[0] == 0:
output_dict.update({'ignorance_score_mean': None,
'brier_score_mean': None})
return output_dict
predicted_multilabel_probs = torch.stack(
[predicted_class_probs, 1.0 - predicted_class_probs], dim=1)
correct_multilabel_probs = torch.stack(
[torch.ones(num_forecasts),
torch.zeros(num_forecasts)], dim=1).to(device)
predicted_log_likelihood_of_correct_category = (
-correct_multilabel_probs * torch.log(predicted_multilabel_probs)).sum(1)
cls_ignorance_score_mean = predicted_log_likelihood_of_correct_category.mean()
output_dict.update(
{'ignorance_score_mean': cls_ignorance_score_mean.to(device).tolist()})
# Classification Brier (Probability) Score
predicted_brier_raw = ((predicted_multilabel_probs -
correct_multilabel_probs)**2).sum(1)
cls_brier_score_mean = predicted_brier_raw.mean()
output_dict.update(
{'brier_score_mean': cls_brier_score_mean.to(device).tolist()})
return output_dict
def softmax_compute_cls_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for multiclass classification results provided by faster_rcnn.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and brier score.
"""
output_dict = {}
predicted_multilabel_probs = input_matches['predicted_cls_probs'][valid_idxs]
if 'gt_cat_idxs' in input_matches.keys():
correct_multilabel_probs = torch.nn.functional.one_hot(input_matches['gt_cat_idxs'][valid_idxs].type(
torch.LongTensor), input_matches['predicted_cls_probs'][valid_idxs].shape[-1]).to(device)
else:
correct_multilabel_probs = torch.zeros_like(
predicted_multilabel_probs).to(device)
correct_multilabel_probs[:, -1] = 1.0
if predicted_multilabel_probs.shape[0] == 0:
output_dict.update({'ignorance_score_mean': None,
'brier_score_mean': None})
return output_dict
predicted_log_likelihood_of_correct_category = (
-correct_multilabel_probs * torch.log(predicted_multilabel_probs)).sum(1)
cls_ignorance_score_mean = predicted_log_likelihood_of_correct_category.mean()
output_dict.update(
{'ignorance_score_mean': cls_ignorance_score_mean.to(device).tolist()})
# Classification Probability Score. Multiclass version of brier score.
predicted_brier_raw = ((predicted_multilabel_probs -
correct_multilabel_probs)**2).sum(1)
cls_brier_score_mean = predicted_brier_raw.mean()
output_dict.update(
{'brier_score_mean': cls_brier_score_mean.to(device).tolist()})
return output_dict
def compute_reg_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for regression results.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and energy scores.
"""
output_dict = {}
predicted_box_means = input_matches['predicted_box_means'][valid_idxs]
predicted_box_covars = input_matches['predicted_box_covariances'][valid_idxs]
gt_box_means = input_matches['gt_box_means'][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_box_means.shape[0] == 0:
output_dict.update({'ignorance_score_mean': None,
'mean_squared_error': None,
'energy_score_mean': None})
return output_dict
# Compute negative log likelihood
# Note: Juggling between CPU and GPU is due to magma library unresolvable issue, where cuda illegal memory access
# error is returned arbitrarily depending on the state of the GPU. This is only a problem for the
# torch.distributions code.
# Pytorch unresolved issue from 2019:
# https://github.com/pytorch/pytorch/issues/21819
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
predicted_box_means.to('cpu'),
predicted_box_covars.to('cpu') +
1e-2 *
torch.eye(
predicted_box_covars.shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
device)
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
device)
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
device)
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
device)
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
device)
# Compute negative log probability
negative_log_prob = - \
predicted_multivariate_normal_dists.log_prob(gt_box_means)
negative_log_prob_mean = negative_log_prob.mean()
output_dict.update({'ignorance_score_mean': negative_log_prob_mean.to(
device).tolist()})
# Compute mean square error
mean_squared_error = ((predicted_box_means - gt_box_means)**2).mean()
output_dict.update(
{'mean_squared_error': mean_squared_error.to(device).tolist()})
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((1001,)).to(device)
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
energy_score = torch.norm(
(sample_set_1 - gt_box_means),
dim=2).mean(0) - 0.5 * torch.norm(
(sample_set_1 - sample_set_2),
dim=2).mean(0)
energy_score_mean = energy_score.mean()
output_dict.update(
{'energy_score_mean': energy_score_mean.to(device).tolist()})
return output_dict
def compute_reg_scores_fn(false_negatives, valid_idxs):
"""
Computes proper scoring rule for regression false positive.
Args:
false_negatives (dict): dictionary containing false_negatives
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing false positives ignorance and energy scores.
"""
output_dict = {}
predicted_box_means = false_negatives['predicted_box_means'][valid_idxs]
predicted_box_covars = false_negatives['predicted_box_covariances'][valid_idxs]
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
predicted_box_means.to('cpu'),
predicted_box_covars.to('cpu') +
1e-2 * torch.eye(predicted_box_covars.shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
device)
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
device)
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
device)
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
device)
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
device)
# If no valid idxs, do not perform computation
if predicted_box_means.shape[0] == 0:
output_dict.update({'total_entropy_mean': None,
'fp_energy_score_mean': None})
return output_dict
fp_entropy = predicted_multivariate_normal_dists.entropy()
fp_entropy_mean = fp_entropy.mean()
output_dict.update({'total_entropy_mean': fp_entropy_mean.to(
device).tolist()})
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((1001,)).to(device)
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
fp_energy_score = torch.norm((sample_set_1 - sample_set_2), dim=2).mean(0)
fp_energy_score_mean = fp_energy_score.mean()
output_dict.update({'fp_energy_score_mean': fp_energy_score_mean.to(
device).tolist()})
return output_dict
| 9,408 | 40.632743 | 133 | py |
probdet | probdet-master/src/core/evaluation_tools/evaluation_utils.py | import numpy as np
import os
import tqdm
import torch
import ujson as json
from collections import defaultdict
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.structures import Boxes, pairwise_iou
# Project imports
from core.datasets import metadata
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def eval_predictions_preprocess(
predicted_instances,
min_allowed_score=0.0,
is_odd=False):
predicted_boxes, predicted_cls_probs, predicted_covar_mats = defaultdict(
torch.Tensor), defaultdict(
torch.Tensor), defaultdict(
torch.Tensor)
for predicted_instance in predicted_instances:
# Remove predictions with undefined category_id. This is used when the training and
# inference datasets come from different data such as COCO-->VOC or COCO-->OpenImages.
# Only happens if not ODD dataset, else all detections will be removed.
if len(predicted_instance['cls_prob']) == 81:
cls_prob = predicted_instance['cls_prob'][:-1]
else:
cls_prob = predicted_instance['cls_prob']
if not is_odd:
skip_test = (
predicted_instance['category_id'] == -
1) or (
np.array(cls_prob).max(0) < min_allowed_score)
else:
skip_test = np.array(cls_prob).max(0) < min_allowed_score
if skip_test:
continue
box_inds = predicted_instance['bbox']
box_inds = np.array([box_inds[0],
box_inds[1],
box_inds[0] + box_inds[2],
box_inds[1] + box_inds[3]])
predicted_boxes[predicted_instance['image_id']] = torch.cat((predicted_boxes[predicted_instance['image_id']].to(
device), torch.as_tensor([box_inds], dtype=torch.float32).to(device)))
predicted_cls_probs[predicted_instance['image_id']] = torch.cat((predicted_cls_probs[predicted_instance['image_id']].to(
device), torch.as_tensor([predicted_instance['cls_prob']], dtype=torch.float32).to(device)))
box_covar = np.array(predicted_instance['bbox_covar'])
transformation_mat = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[1.0, 0, 1.0, 0],
[0, 1.0, 0.0, 1.0]])
cov_pred = np.matmul(
np.matmul(
transformation_mat,
box_covar),
transformation_mat.T).tolist()
predicted_covar_mats[predicted_instance['image_id']] = torch.cat(
(predicted_covar_mats[predicted_instance['image_id']].to(device), torch.as_tensor([cov_pred], dtype=torch.float32).to(device)))
return dict({'predicted_boxes': predicted_boxes,
'predicted_cls_probs': predicted_cls_probs,
'predicted_covar_mats': predicted_covar_mats})
def eval_gt_preprocess(gt_instances):
gt_boxes, gt_cat_idxs, gt_is_truncated, gt_is_occluded = defaultdict(
torch.Tensor), defaultdict(
torch.Tensor), defaultdict(
torch.Tensor), defaultdict(
torch.Tensor)
for gt_instance in gt_instances:
box_inds = gt_instance['bbox']
box_inds = np.array([box_inds[0],
box_inds[1],
box_inds[0] + box_inds[2],
box_inds[1] + box_inds[3]])
gt_boxes[gt_instance['image_id']] = torch.cat((gt_boxes[gt_instance['image_id']].cuda(
), torch.as_tensor([box_inds], dtype=torch.float32).to(device)))
gt_cat_idxs[gt_instance['image_id']] = torch.cat((gt_cat_idxs[gt_instance['image_id']].cuda(
), torch.as_tensor([[gt_instance['category_id']]], dtype=torch.float32).to(device)))
if 'is_truncated' in gt_instance.keys():
gt_is_truncated[gt_instance['image_id']] = torch.cat((gt_is_truncated[gt_instance['image_id']].cuda(
), torch.as_tensor([gt_instance['is_truncated']], dtype=torch.float32).to(device)))
gt_is_occluded[gt_instance['image_id']] = torch.cat((gt_is_occluded[gt_instance['image_id']].cuda(
), torch.as_tensor([gt_instance['is_occluded']], dtype=torch.float32).to(device)))
if 'is_truncated' in gt_instances[0].keys():
return dict({'gt_boxes': gt_boxes,
'gt_cat_idxs': gt_cat_idxs,
'gt_is_truncated': gt_is_truncated,
'gt_is_occluded': gt_is_occluded})
else:
return dict({'gt_boxes': gt_boxes,
'gt_cat_idxs': gt_cat_idxs})
def get_matched_results(
cfg,
inference_output_dir,
iou_min=0.1,
iou_correct=0.7,
min_allowed_score=0.0):
try:
matched_results = torch.load(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_{}.pth".format(
iou_min,
iou_correct,
min_allowed_score)), map_location=device)
return matched_results
except FileNotFoundError:
preprocessed_predicted_instances, preprocessed_gt_instances = get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score)
predicted_box_means = preprocessed_predicted_instances['predicted_boxes']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_box_covariances = preprocessed_predicted_instances['predicted_covar_mats']
gt_box_means = preprocessed_gt_instances['gt_boxes']
gt_cat_idxs = preprocessed_gt_instances['gt_cat_idxs']
if 'gt_is_truncated' in preprocessed_gt_instances.keys():
is_truncated = preprocessed_gt_instances['gt_is_truncated']
else:
is_truncated = None
if 'gt_is_occluded' in preprocessed_gt_instances.keys():
is_occluded = preprocessed_gt_instances['gt_is_occluded']
else:
is_occluded = None
matched_results = match_predictions_to_groundtruth(
predicted_box_means,
predicted_cls_probs,
predicted_box_covariances,
gt_box_means,
gt_cat_idxs,
iou_min,
iou_correct,
is_truncated=is_truncated,
is_occluded=is_occluded)
torch.save(
matched_results,
os.path.join(
inference_output_dir,
"matched_results_{}_{}_{}.pth".format(
iou_min,
iou_correct,
min_allowed_score)))
return matched_results
def get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score=0.0):
prediction_file_name = os.path.join(
inference_output_dir,
'coco_instances_results.json')
meta_catalog = MetadataCatalog.get(cfg.ACTUAL_TEST_DATASET)
# Process GT
print("Began pre-processing ground truth annotations...")
try:
preprocessed_gt_instances = torch.load(
os.path.join(
os.path.split(meta_catalog.json_file)[0],
"preprocessed_gt_instances.pth"), map_location=device)
except FileNotFoundError:
gt_info = json.load(
open(
meta_catalog.json_file,
'r'))
gt_instances = gt_info['annotations']
preprocessed_gt_instances = eval_gt_preprocess(
gt_instances)
torch.save(
preprocessed_gt_instances,
os.path.join(
os.path.split(meta_catalog.json_file)[0],
"preprocessed_gt_instances.pth"))
print("Done!")
print("Began pre-processing predicted instances...")
try:
preprocessed_predicted_instances = torch.load(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_{}.pth".format(min_allowed_score)),
map_location=device)
# Process predictions
except FileNotFoundError:
predicted_instances = json.load(open(prediction_file_name, 'r'))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score)
torch.save(
preprocessed_predicted_instances,
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_{}.pth".format(min_allowed_score)))
print("Done!")
return preprocessed_predicted_instances, preprocessed_gt_instances
def match_predictions_to_groundtruth(predicted_box_means,
predicted_cls_probs,
predicted_box_covariances,
gt_box_means,
gt_cat_idxs,
iou_min=0.1,
iou_correct=0.7,
is_truncated=None,
is_occluded=None):
# Flag to know if truncation and occlusion should be saved:
trunc_occ_flag = is_truncated is not None and is_occluded is not None
true_positives = dict(
{
'predicted_box_means': torch.Tensor().to(device),
'predicted_box_covariances': torch.Tensor().to(device),
'predicted_cls_probs': torch.Tensor().to(device),
'gt_box_means': torch.Tensor().to(device),
'gt_cat_idxs': torch.Tensor().to(device),
'iou_with_ground_truth': torch.Tensor().to(device),
'is_truncated': torch.Tensor().to(device),
'is_occluded': torch.Tensor().to(device)})
localization_errors = dict(
{
'predicted_box_means': torch.Tensor().to(device),
'predicted_box_covariances': torch.Tensor().to(device),
'predicted_cls_probs': torch.Tensor().to(device),
'gt_box_means': torch.Tensor().to(device),
'gt_cat_idxs': torch.Tensor().to(device),
'iou_with_ground_truth': torch.Tensor().to(device),
'is_truncated': torch.Tensor().to(device),
'is_occluded': torch.Tensor().to(device)})
duplicates = dict({'predicted_box_means': torch.Tensor().to(device),
'predicted_box_covariances': torch.Tensor().to(device),
'predicted_cls_probs': torch.Tensor().to(device),
'gt_box_means': torch.Tensor().to(device),
'gt_cat_idxs': torch.Tensor().to(device),
'iou_with_ground_truth': torch.Tensor().to(device),
'is_truncated': torch.Tensor().to(device),
'is_occluded': torch.Tensor().to(device)})
false_positives = dict({'predicted_box_means': torch.Tensor().to(device),
'predicted_box_covariances': torch.Tensor().to(device),
'predicted_cls_probs': torch.Tensor().to(device)})
false_negatives = dict({'gt_box_means': torch.Tensor().to(device),
'gt_cat_idxs': torch.Tensor().to(device),
'is_truncated': torch.Tensor().to(device),
'is_occluded': torch.Tensor().to(device)
})
with tqdm.tqdm(total=len(predicted_box_means)) as pbar:
for key in predicted_box_means.keys():
pbar.update(1)
# Check if gt available, if not all detections go to false
# positives
if key not in gt_box_means.keys():
false_positives['predicted_box_means'] = torch.cat(
(false_positives['predicted_box_means'], predicted_box_means[key]))
false_positives['predicted_cls_probs'] = torch.cat(
(false_positives['predicted_cls_probs'], predicted_cls_probs[key]))
false_positives['predicted_box_covariances'] = torch.cat(
(false_positives['predicted_box_covariances'], predicted_box_covariances[key]))
continue
# Compute iou between gt boxes and all predicted boxes in frame
frame_gt_boxes = Boxes(gt_box_means[key])
frame_predicted_boxes = Boxes(predicted_box_means[key])
num_predictions_in_frame = frame_predicted_boxes.tensor.shape[0]
match_iou = pairwise_iou(frame_gt_boxes, frame_predicted_boxes)
# False positives are detections that have an iou < match iou with
# any ground truth object.
false_positive_idxs = (match_iou <= iou_min).all(0)
false_positives['predicted_box_means'] = torch.cat(
(false_positives['predicted_box_means'],
predicted_box_means[key][false_positive_idxs]))
false_positives['predicted_cls_probs'] = torch.cat(
(false_positives['predicted_cls_probs'],
predicted_cls_probs[key][false_positive_idxs]))
false_positives['predicted_box_covariances'] = torch.cat(
(false_positives['predicted_box_covariances'],
predicted_box_covariances[key][false_positive_idxs]))
num_fp_in_frame = false_positive_idxs.sum(0)
# True positives are any detections with match iou > iou correct. We need to separate these detections to
# True positive and duplicate set. The true positive detection is the detection assigned the highest score
# by the neural network.
true_positive_idxs = torch.nonzero(
match_iou >= iou_correct, as_tuple=False)
# Setup tensors to allow assignment of detections only once.
processed_gt = torch.tensor([]).type(torch.LongTensor).to(device)
predictions_idxs_processed = torch.tensor(
[]).type(torch.LongTensor).to(device)
for i in torch.arange(frame_gt_boxes.tensor.shape[0]):
# Check if true positive has been previously assigned to a ground truth box and remove it if this is
# the case. Very rare occurrence but need to handle it
# nevertheless.
prediction_idxs = true_positive_idxs[true_positive_idxs[:, 0] == i][:, 1]
non_valid_idxs = torch.nonzero(
predictions_idxs_processed[..., None] == prediction_idxs, as_tuple=False)
if non_valid_idxs.shape[0] > 0:
prediction_idxs[non_valid_idxs[:, 1]] = -1
prediction_idxs = prediction_idxs[prediction_idxs != -1]
if prediction_idxs.shape[0] > 0:
# If there is a prediction attached to gt, count it as
# processed.
processed_gt = torch.cat(
(processed_gt, i.unsqueeze(0).to(
processed_gt.device)))
predictions_idxs_processed = torch.cat(
(predictions_idxs_processed, prediction_idxs))
current_matches_predicted_cls_probs = predicted_cls_probs[key][prediction_idxs]
max_score, _ = torch.max(
current_matches_predicted_cls_probs, 1)
_, max_idxs = max_score.topk(max_score.shape[0])
if max_idxs.shape[0] > 1:
max_idx = max_idxs[0]
duplicate_idxs = max_idxs[1:]
else:
max_idx = max_idxs
duplicate_idxs = torch.empty(0).to(device)
current_matches_predicted_box_means = predicted_box_means[key][prediction_idxs]
current_matches_predicted_box_covariances = predicted_box_covariances[
key][prediction_idxs]
# Highest scoring detection goes to true positives
true_positives['predicted_box_means'] = torch.cat(
(true_positives['predicted_box_means'],
current_matches_predicted_box_means[max_idx:max_idx + 1, :]))
true_positives['predicted_cls_probs'] = torch.cat(
(true_positives['predicted_cls_probs'],
current_matches_predicted_cls_probs[max_idx:max_idx + 1, :]))
true_positives['predicted_box_covariances'] = torch.cat(
(true_positives['predicted_box_covariances'],
current_matches_predicted_box_covariances[max_idx:max_idx + 1, :]))
true_positives['gt_box_means'] = torch.cat(
(true_positives['gt_box_means'], gt_box_means[key][i:i + 1, :]))
true_positives['gt_cat_idxs'] = torch.cat(
(true_positives['gt_cat_idxs'], gt_cat_idxs[key][i:i + 1, :]))
if trunc_occ_flag:
true_positives['is_truncated'] = torch.cat(
(true_positives['is_truncated'], is_truncated[key][i:i + 1]))
true_positives['is_occluded'] = torch.cat(
(true_positives['is_occluded'], is_occluded[key][i:i + 1]))
true_positives['iou_with_ground_truth'] = torch.cat(
(true_positives['iou_with_ground_truth'], match_iou[i, prediction_idxs][max_idx:max_idx + 1]))
# Lower scoring redundant detections go to duplicates
if duplicate_idxs.shape[0] > 1:
duplicates['predicted_box_means'] = torch.cat(
(duplicates['predicted_box_means'], current_matches_predicted_box_means[duplicate_idxs, :]))
duplicates['predicted_cls_probs'] = torch.cat(
(duplicates['predicted_cls_probs'], current_matches_predicted_cls_probs[duplicate_idxs, :]))
duplicates['predicted_box_covariances'] = torch.cat(
(duplicates['predicted_box_covariances'],
current_matches_predicted_box_covariances[duplicate_idxs, :]))
duplicates['gt_box_means'] = torch.cat(
(duplicates['gt_box_means'], gt_box_means[key][np.repeat(i, duplicate_idxs.shape[0]), :]))
duplicates['gt_cat_idxs'] = torch.cat(
(duplicates['gt_cat_idxs'], gt_cat_idxs[key][np.repeat(i, duplicate_idxs.shape[0]), :]))
if trunc_occ_flag:
duplicates['is_truncated'] = torch.cat(
(duplicates['is_truncated'], is_truncated[key][np.repeat(i, duplicate_idxs.shape[0])]))
duplicates['is_occluded'] = torch.cat(
(duplicates['is_occluded'], is_occluded[key][np.repeat(i, duplicate_idxs.shape[0])]))
duplicates['iou_with_ground_truth'] = torch.cat(
(duplicates['iou_with_ground_truth'],
match_iou[i, prediction_idxs][duplicate_idxs]))
elif duplicate_idxs.shape[0] == 1:
# Special case when only one duplicate exists, required to
# index properly for torch.cat
duplicates['predicted_box_means'] = torch.cat(
(duplicates['predicted_box_means'],
current_matches_predicted_box_means[duplicate_idxs:duplicate_idxs + 1, :]))
duplicates['predicted_cls_probs'] = torch.cat(
(duplicates['predicted_cls_probs'],
current_matches_predicted_cls_probs[duplicate_idxs:duplicate_idxs + 1, :]))
duplicates['predicted_box_covariances'] = torch.cat(
(duplicates['predicted_box_covariances'],
current_matches_predicted_box_covariances[duplicate_idxs:duplicate_idxs + 1, :]))
duplicates['gt_box_means'] = torch.cat(
(duplicates['gt_box_means'], gt_box_means[key][i:i + 1, :]))
duplicates['gt_cat_idxs'] = torch.cat(
(duplicates['gt_cat_idxs'], gt_cat_idxs[key][i:i + 1, :]))
if trunc_occ_flag:
duplicates['is_truncated'] = torch.cat(
(duplicates['is_truncated'], is_truncated[key][i:i + 1]))
duplicates['is_occluded'] = torch.cat(
(duplicates['is_occluded'], is_occluded[key][i:i + 1]))
duplicates['iou_with_ground_truth'] = torch.cat(
(duplicates['iou_with_ground_truth'],
match_iou[i, prediction_idxs][duplicate_idxs:duplicate_idxs + 1]))
num_tp_dup_in_frame = predictions_idxs_processed.shape[0]
# Process localization errors. Localization errors are detections with iou < 0.5 with any ground truth.
# Mask out processed true positives/duplicates so they are not
# re-associated with another gt
# ToDo Localization Errors and False Positives are constant, do not change. We could generate them only
# once.
match_iou[:, true_positive_idxs[:, 1]] *= 0.0
localization_errors_idxs = torch.nonzero(
(match_iou > iou_min) & (
match_iou < 0.5), as_tuple=False)
# Setup tensors to allow assignment of detections only once.
processed_localization_errors = torch.tensor(
[]).type(torch.LongTensor).to(device)
for localization_error_idx in localization_errors_idxs[:, 1]:
# If localization error has been processed, skip iteration.
if (processed_localization_errors ==
localization_error_idx).any():
continue
# For every localization error, assign the ground truth with
# highest IOU.
gt_loc_error_idxs = localization_errors_idxs[localization_errors_idxs[:, 1]
== localization_error_idx]
ious_with_gts = match_iou[gt_loc_error_idxs[:,
0], gt_loc_error_idxs[:, 1]]
gt_loc_error_idxs = gt_loc_error_idxs[:, 0]
# Choose the gt with the largest IOU with localization error
if gt_loc_error_idxs.shape[0] > 1:
sorted_idxs = ious_with_gts.sort(
descending=True)[1]
gt_loc_error_idxs = gt_loc_error_idxs[sorted_idxs[0]:sorted_idxs[0] + 1]
processed_gt = torch.cat((processed_gt,
gt_loc_error_idxs))
localization_errors['predicted_box_means'] = torch.cat(
(localization_errors['predicted_box_means'],
predicted_box_means[key][localization_error_idx:localization_error_idx + 1, :]))
localization_errors['predicted_cls_probs'] = torch.cat(
(localization_errors['predicted_cls_probs'],
predicted_cls_probs[key][localization_error_idx:localization_error_idx + 1, :]))
localization_errors['predicted_box_covariances'] = torch.cat(
(localization_errors['predicted_box_covariances'],
predicted_box_covariances[key][localization_error_idx:localization_error_idx + 1, :]))
localization_errors['gt_box_means'] = torch.cat(
(localization_errors['gt_box_means'], gt_box_means[key][gt_loc_error_idxs:gt_loc_error_idxs + 1, :]))
localization_errors['gt_cat_idxs'] = torch.cat(
(localization_errors['gt_cat_idxs'], gt_cat_idxs[key][gt_loc_error_idxs:gt_loc_error_idxs + 1]))
if trunc_occ_flag:
localization_errors['is_truncated'] = torch.cat(
(localization_errors['is_truncated'], is_truncated[key][gt_loc_error_idxs:gt_loc_error_idxs + 1]))
localization_errors['is_occluded'] = torch.cat(
(localization_errors['is_occluded'], is_occluded[key][gt_loc_error_idxs:gt_loc_error_idxs + 1]))
localization_errors['iou_with_ground_truth'] = torch.cat(
(localization_errors['iou_with_ground_truth'],
match_iou[gt_loc_error_idxs, localization_error_idx:localization_error_idx + 1]))
# Append processed localization errors
processed_localization_errors = torch.cat(
(processed_localization_errors, localization_error_idx.unsqueeze(0)))
# Assert that the total number of processed predictions do not exceed the number of predictions in frame.
num_loc_errors_in_frame = processed_localization_errors.shape[0]
num_processed_predictions = num_loc_errors_in_frame + \
num_fp_in_frame + num_tp_dup_in_frame
# At the limit where iou_correct=0.5, equality holds.
assert (num_processed_predictions <= num_predictions_in_frame)
# Get false negative ground truth, which are fully missed.
# These can be found by looking for GT instances not processed.
processed_gt = processed_gt.unique()
false_negative_idxs = torch.ones(frame_gt_boxes.tensor.shape[0])
false_negative_idxs[processed_gt] = 0
false_negative_idxs = false_negative_idxs.type(torch.bool)
false_negatives['gt_box_means'] = torch.cat(
(false_negatives['gt_box_means'],
gt_box_means[key][false_negative_idxs]))
false_negatives['gt_cat_idxs'] = torch.cat(
(false_negatives['gt_cat_idxs'],
gt_cat_idxs[key][false_negative_idxs]))
if trunc_occ_flag:
false_negatives['is_truncated'] = torch.cat(
(false_negatives['is_truncated'],
is_truncated[key][false_negative_idxs]))
false_negatives['is_occluded'] = torch.cat(
(false_negatives['is_occluded'],
is_occluded[key][false_negative_idxs]))
matched_results = dict()
matched_results.update({"true_positives": true_positives,
"localization_errors": localization_errors,
"duplicates": duplicates,
"false_positives": false_positives,
"false_negatives": false_negatives})
return matched_results
def get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id):
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
if train_thing_dataset_id_to_contiguous_id == test_thing_dataset_id_to_contiguous_id:
cat_mapping_dict = dict(
(v, k) for k, v in test_thing_dataset_id_to_contiguous_id.items())
else:
# If not equal, three situations: 1) BDD to KITTI, 2) COCO to PASCAL,
# or 3) COCO to OpenImages
cat_mapping_dict = dict(
(v, k) for k, v in test_thing_dataset_id_to_contiguous_id.items())
if 'voc' in args.test_dataset and 'coco' in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.COCO_TO_VOC_CONTIGUOUS_ID.items())
if 'openimages' in args.test_dataset and 'coco' in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.COCO_TO_OPENIMAGES_CONTIGUOUS_ID.items())
elif 'kitti' in args.test_dataset and 'bdd' in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.BDD_TO_KITTI_CONTIGUOUS_ID.items())
else:
ValueError(
'Cannot generate category mapping dictionary. Please check if training and inference datasets are compatible.')
cat_mapping_dict = dict(
(dataset_mapping_dict[k], v) for k, v in cat_mapping_dict.items())
return cat_mapping_dict
def get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id):
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id)
return {v: k for k, v in cat_mapping_dict.items()}
| 29,261 | 49.364888 | 139 | py |
probdet | probdet-master/src/core/visualization_tools/results_processing_tools.py | import glob
import itertools
import numpy as np
import os
import pickle
import torch
from collections import defaultdict
# Project imports
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
def get_clean_results_dict(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 3, 5, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(list))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
'probabilistic_scoring_res_averaged_*.pkl'))[0]
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
prob_dict_name = 'probabilistic_scoring_res_averaged_*.pkl' if image_corruption_level == 'OpenIm' else 'probabilistic_scoring_res_odd_*.pkl'
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
prob_dict_name))[0]
with open(dictionary_file_name, "rb") as pickle_file:
res_dict = pickle.load(pickle_file)
if image_corruption_level != 'OpenIm OOD':
# True Positives Results
res_dict_clean['True Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['true_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Brier Score'].extend(
res_dict['true_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['True Positives']['Negative Log Likelihood (Regression)'].extend(
res_dict['true_positives_reg_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Mean Squared Error'].extend(
res_dict['true_positives_reg_analysis']['mean_squared_error'])
res_dict_clean['True Positives']['Energy Score'].extend(
res_dict['true_positives_reg_analysis']['energy_score_mean'])
res_dict_clean['True Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['True Positives']['Method Name'].extend(
[config_name] * res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
# Duplicates Results
res_dict_clean['Duplicates']['Negative Log Likelihood (Classification)'].extend(
res_dict['duplicates_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Brier Score'].extend(
res_dict['duplicates_cls_analysis']['brier_score_mean'])
res_dict_clean['Duplicates']['Negative Log Likelihood (Regression)'].extend(
res_dict['duplicates_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Mean Squared Error'].extend(
res_dict['duplicates_reg_analysis']['mean_squared_error'])
res_dict_clean['Duplicates']['Energy Score'].extend(
res_dict['duplicates_reg_analysis']['energy_score_mean'])
res_dict_clean['Duplicates']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Duplicates']['Method Name'].extend(
[config_name] * res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
# Localization Error Results
res_dict_clean['Localization Errors']['Negative Log Likelihood (Classification)'].extend(
res_dict['localization_errors_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Brier Score'].extend(
res_dict['localization_errors_cls_analysis']['brier_score_mean'])
res_dict_clean['Localization Errors']['Negative Log Likelihood (Regression)'].extend(
res_dict['localization_errors_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Mean Squared Error'].extend(
res_dict['localization_errors_reg_analysis']['mean_squared_error'])
res_dict_clean['Localization Errors']['Energy Score'].extend(
res_dict['localization_errors_reg_analysis']['energy_score_mean'])
res_dict_clean['Localization Errors']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Localization Errors']['Method Name'].extend(
[config_name] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['false_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].extend(
res_dict['false_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].extend(
res_dict['false_positives_reg_analysis']['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
res_dict_clean['False Positives']['Method Name'].extend(
[config_name] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
else:
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].append(
res_dict['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].append(
res_dict['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].append(
res_dict['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].append(
image_corruption_level)
res_dict_clean['False Positives']['Method Name'].append(
config_name)
return res_dict_clean
def get_mAP_results(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 2, 3, 4, 5, 10]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
mAP_results = defaultdict(list)
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
text_file_name = glob.glob(
os.path.join(
inference_output_dir,
'mAP_res.txt'))[0]
with open(text_file_name, "r") as f:
mAP = f.read().strip('][\n').split(', ')[0]
mAP = float(mAP) * 100
mAP_results['Method Name'].append(config_name)
mAP_results['Image Corruption Level'].append(
image_corruption_level)
mAP_results['mAP'].append(mAP)
return mAP_results
def get_matched_results_dicts(config_names,
configs_list,
inference_configs_list,
iou_min=0.1,
iou_correct=0.5):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get matched results by either generating them or loading from
# file.
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
elif image_corruption_level == 'OpenIm':
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_*.pth"))[0]
preprocessed_predicted_instances = torch.load(
dictionary_file_name, map_location='cuda')
predicted_boxes = preprocessed_predicted_instances['predicted_boxes']
predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_boxes = list(itertools.chain.from_iterable(
[predicted_boxes[key] for key in predicted_boxes.keys()]))
predicted_cov_mats = list(itertools.chain.from_iterable(
[predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))
predicted_cls_probs = list(itertools.chain.from_iterable(
[predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))
predicted_boxes = torch.stack(
predicted_boxes, 1).transpose(
0, 1)
predicted_cov_mats = torch.stack(
predicted_cov_mats, 1).transpose(0, 1)
predicted_cls_probs = torch.stack(
predicted_cls_probs,
1).transpose(
0,
1)
matched_results = {
'predicted_box_means': predicted_boxes,
'predicted_box_covariances': predicted_cov_mats,
'predicted_cls_probs': predicted_cls_probs}
if image_corruption_level != 'OpenIm OOD':
all_results_means = torch.cat(
(matched_results['true_positives']['predicted_box_means'],
matched_results['localization_errors']['predicted_box_means'],
matched_results['duplicates']['predicted_box_means'],
matched_results['false_positives']['predicted_box_means']))
all_results_covs = torch.cat(
(matched_results['true_positives']['predicted_box_covariances'],
matched_results['localization_errors']['predicted_box_covariances'],
matched_results['duplicates']['predicted_box_covariances'],
matched_results['false_positives']['predicted_box_covariances']))
all_gt_means = torch.cat(
(matched_results['true_positives']['gt_box_means'],
matched_results['localization_errors']['gt_box_means'],
matched_results['duplicates']['gt_box_means'],
matched_results['false_positives']['predicted_box_means']*np.NaN))
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
all_results_means.to('cpu'),
all_results_covs.to('cpu') +
1e-2 *
torch.eye(all_results_covs.shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
all_log_prob = -predicted_multivariate_normal_dists.log_prob(all_gt_means)
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((3,)).to('cuda')
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
energy_score = torch.norm(
(sample_set_1 - all_gt_means),
dim=2).mean(0) - 0.5 * torch.norm(
(sample_set_1 - sample_set_2),
dim=2).mean(0)
mse_loss = torch.nn.MSELoss(reduction='none')
mse = mse_loss(all_gt_means, all_results_means).mean(1)
res_dict_clean[config_name][image_corruption_level]['Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['MSE'].extend(
mse.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['NLL'].extend(
all_log_prob.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['ED'].extend(
energy_score.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.cat(
(matched_results['true_positives']['iou_with_ground_truth'],
matched_results['localization_errors']['iou_with_ground_truth'][:, 0],
matched_results['duplicates']['iou_with_ground_truth'],
torch.zeros(
matched_results['false_positives']['predicted_box_means'].shape[0]).to('cuda')*np.NaN)).cpu().numpy())
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['false_positives']['predicted_box_means'].to('cpu'),
matched_results['false_positives']['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['false_positives']['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
FP_Entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
FP_Entropy.cpu().numpy())
predicted_cat_dists_fp = matched_results['false_positives']['predicted_cls_probs']
if predicted_cat_dists_fp.shape[1] == 80:
predicted_cat_dists_fp, _ = predicted_cat_dists_fp.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=matched_results['false_positives']['predicted_cls_probs'])
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
if image_corruption_level == 'OpenIm':
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(matched_results['true_positives']['is_truncated'],
matched_results['localization_errors']['is_truncated'],
matched_results['duplicates']['is_truncated'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(matched_results['true_positives']['is_occluded'],
matched_results['localization_errors']['is_occluded'],
matched_results['duplicates']['is_occluded'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['predicted_box_means'].to('cpu'),
matched_results['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.zeros(
matched_results['predicted_box_means'].shape[0]).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
all_results_cat = matched_results['predicted_cls_probs']
if all_results_cat.shape[1] == 80:
predicted_cat_dists_fp, _ = all_results_cat.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=all_results_cat)
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
return res_dict_clean
def mean_reject_outliers(x, outlierConstant=1.5):
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * outlierConstant
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
result = a[np.where((a >= quartileSet[0]) & (a <= quartileSet[1]))]
return np.nanmean(result)
| 30,031 | 53.703097 | 161 | py |
probdet | probdet-master/src/probabilistic_inference/probabilistic_retinanet_predictor.py | import numpy as np
import torch
# Detectron Imports
from detectron2.layers import batched_nms, cat
from detectron2.structures import Boxes, Instances, pairwise_iou
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class RetinaNetProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# Create transform
self.sample_box2box_transform = inference_utils.SampleBox2BoxTransform(
self.cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
def retinanet_probabilistic_inference(
self,
input_im,
outputs=None,
ensemble_inference=False,
outputs_list=None):
"""
General RetinaNet probabilistic anchor-wise inference. Preliminary inference step for many post-processing
based inference methods such as standard_nms, output_statistics, and bayes_od.
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): outputs from model.forward. Will be computed internally if not provided.
ensemble_inference (bool): True if ensembles are used for inference. If set to true, outputs_list must be externally provided.
outputs_list (list): List of model() outputs, usually generated from ensembles of models.
Returns:
all_predicted_boxes,
all_predicted_boxes_covariance (Tensor): Nx4x4 vectors used
all_predicted_prob (Tensor): Nx1 scores which represent max of all_pred_prob_vectors. For usage in NMS and mAP computation.
all_classes_idxs (Tensor): Nx1 Class ids to be used for NMS.
all_predicted_prob_vectors (Tensor): NxK tensor where K is the number of classes.
"""
is_epistemic = ((self.mc_dropout_enabled and self.num_mc_dropout_runs > 1)
or ensemble_inference) and outputs is None
if is_epistemic:
if self.mc_dropout_enabled and self.num_mc_dropout_runs > 1:
outputs_list = self.model(
input_im,
return_anchorwise_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs)
n_fms = len(self.model.in_features)
outputs_list = [{key: value[i * n_fms:(i + 1) * n_fms] if value is not None else value for key,
value in outputs_list.items()} for i in range(self.num_mc_dropout_runs)]
outputs = {'anchors': outputs_list[0]['anchors']}
# Compute box classification and classification variance means
box_cls = [output['box_cls'] for output in outputs_list]
box_cls_mean = box_cls[0]
for i in range(len(box_cls) - 1):
box_cls_mean = [box_cls_mean[j] + box_cls[i][j]
for j in range(len(box_cls_mean))]
box_cls_mean = [
box_cls_f_map /
len(box_cls) for box_cls_f_map in box_cls_mean]
outputs.update({'box_cls': box_cls_mean})
if outputs_list[0]['box_cls_var'] is not None:
box_cls_var = [output['box_cls_var']
for output in outputs_list]
box_cls_var_mean = box_cls_var[0]
for i in range(len(box_cls_var) - 1):
box_cls_var_mean = [
box_cls_var_mean[j] +
box_cls_var[i][j] for j in range(
len(box_cls_var_mean))]
box_cls_var_mean = [
box_cls_var_f_map /
len(box_cls_var) for box_cls_var_f_map in box_cls_var_mean]
else:
box_cls_var_mean = None
outputs.update({'box_cls_var': box_cls_var_mean})
# Compute box regression epistemic variance and mean, and aleatoric
# variance mean
box_delta_list = [output['box_delta']
for output in outputs_list]
box_delta_mean = box_delta_list[0]
for i in range(len(box_delta_list) - 1):
box_delta_mean = [
box_delta_mean[j] +
box_delta_list[i][j] for j in range(
len(box_delta_mean))]
box_delta_mean = [
box_delta_f_map /
len(box_delta_list) for box_delta_f_map in box_delta_mean]
outputs.update({'box_delta': box_delta_mean})
if outputs_list[0]['box_reg_var'] is not None:
box_reg_var = [output['box_reg_var']
for output in outputs_list]
box_reg_var_mean = box_reg_var[0]
for i in range(len(box_reg_var) - 1):
box_reg_var_mean = [
box_reg_var_mean[j] +
box_reg_var[i][j] for j in range(
len(box_reg_var_mean))]
box_reg_var_mean = [
box_delta_f_map /
len(box_reg_var) for box_delta_f_map in box_reg_var_mean]
else:
box_reg_var_mean = None
outputs.update({'box_reg_var': box_reg_var_mean})
elif outputs is None:
outputs = self.model(input_im, return_anchorwise_output=True)
all_anchors = []
all_predicted_deltas = []
all_predicted_boxes_cholesky = []
all_predicted_prob = []
all_classes_idxs = []
all_predicted_prob_vectors = []
all_predicted_boxes_epistemic_covar = []
for i, anchors in enumerate(outputs['anchors']):
box_cls = outputs['box_cls'][i][0]
box_delta = outputs['box_delta'][i][0]
# If classification aleatoric uncertainty available, perform
# monte-carlo sampling to generate logits.
if outputs['box_cls_var'] is not None:
box_cls_var = outputs['box_cls_var'][i][0]
box_cls_dists = torch.distributions.normal.Normal(
box_cls, scale=torch.sqrt(torch.exp(box_cls_var)))
box_cls = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
box_cls = torch.mean(box_cls.sigmoid_(), 0)
else:
box_cls = box_cls.sigmoid_()
# Keep top k top scoring indices only.
num_topk = min(self.model.test_topk_candidates, box_delta.size(0))
predicted_prob, classes_idxs = torch.max(box_cls, 1)
predicted_prob, topk_idxs = predicted_prob.topk(num_topk)
# filter out the proposals with low confidence score
keep_idxs = predicted_prob > self.model.test_score_thresh
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = topk_idxs[keep_idxs]
anchor_idxs = topk_idxs
classes_idxs = classes_idxs[topk_idxs]
box_delta = box_delta[anchor_idxs]
anchors = anchors[anchor_idxs]
cholesky_decomp = None
if outputs['box_reg_var'] is not None:
box_reg_var = outputs['box_reg_var'][i][0][anchor_idxs]
box_reg_var = clamp_log_variance(box_reg_var)
# Construct cholesky decomposition using diagonal vars
cholesky_decomp = covariance_output_to_cholesky(box_reg_var)
# In case dropout is enabled, we need to compute aleatoric
# covariance matrix and add it here:
box_reg_epistemic_covar = None
if is_epistemic:
# Compute epistemic box covariance matrix
box_delta_list_i = [
self.model.box2box_transform.apply_deltas(
box_delta_i[i][0][anchor_idxs],
anchors.tensor) for box_delta_i in box_delta_list]
_, box_reg_epistemic_covar = inference_utils.compute_mean_covariance_torch(
box_delta_list_i)
all_predicted_deltas.append(box_delta)
all_predicted_boxes_cholesky.append(cholesky_decomp)
all_anchors.append(anchors.tensor)
all_predicted_prob.append(predicted_prob)
all_predicted_prob_vectors.append(box_cls[anchor_idxs])
all_classes_idxs.append(classes_idxs)
all_predicted_boxes_epistemic_covar.append(box_reg_epistemic_covar)
box_delta = cat(all_predicted_deltas)
anchors = cat(all_anchors)
if isinstance(all_predicted_boxes_cholesky[0], torch.Tensor):
# Generate multivariate samples to be used for monte-carlo simulation. We can afford much more samples
# here since the matrix dimensions are much smaller and therefore
# have much less memory footprint. Keep 100 or less to maintain
# reasonable runtime speed.
cholesky_decomp = cat(all_predicted_boxes_cholesky)
multivariate_normal_samples = torch.distributions.MultivariateNormal(
box_delta, scale_tril=cholesky_decomp)
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample(
(1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2)
samples_anchors = torch.repeat_interleave(
anchors.unsqueeze(2), 1000, dim=2)
# Transform samples from deltas to boxes
t_dist_samples = self.sample_box2box_transform.apply_samples_deltas(
distributions_samples, samples_anchors)
# Compute samples mean and covariance matrices.
all_predicted_boxes, all_predicted_boxes_covariance = inference_utils.compute_mean_covariance_torch(
t_dist_samples)
if isinstance(
all_predicted_boxes_epistemic_covar[0],
torch.Tensor):
epistemic_covar_mats = cat(
all_predicted_boxes_epistemic_covar)
all_predicted_boxes_covariance += epistemic_covar_mats
else:
# This handles the case where no aleatoric uncertainty is available
if is_epistemic:
all_predicted_boxes_covariance = cat(
all_predicted_boxes_epistemic_covar)
else:
all_predicted_boxes_covariance = []
# predict boxes
all_predicted_boxes = self.model.box2box_transform.apply_deltas(
box_delta, anchors)
return all_predicted_boxes, all_predicted_boxes_covariance, cat(
all_predicted_prob), cat(all_classes_idxs), cat(all_predicted_prob_vectors)
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results. It can combine aleatoric uncertainty
from heteroscedastic regression and epistemic uncertainty from monte-carlo dropout for both classification and
regression results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.model.test_nms_thresh, self.model.max_detections_per_image)
def post_processing_output_statistics(self, input_im):
"""
This function produces box covariance matrices using anchor statistics. Uses the fact that multiple anchors are
regressed to the same spatial location for clustering and extraction of box covariance matrix.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
def post_processing_mc_dropout_ensembles(self, input_im):
"""
This function produces results using multiple runs of MC dropout, through fusion before or after
the non-maximum suppression step.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
return self.post_processing_standard_nms(input_im)
else:
outputs_dict = self.model(
input_im,
return_anchorwise_output=False,
num_mc_dropout_runs=self.num_mc_dropout_runs)
n_fms = len(self.model.in_features)
outputs_list = [{key: value[i * n_fms:(i + 1) * n_fms] if value is not None else value for key,
value in outputs_dict.items()} for i in range(self.num_mc_dropout_runs)]
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.retinanet_probabilistic_inference(
input_im,
outputs=outputs),
self.model.test_nms_thresh,
self.model.max_detections_per_image) for outputs in outputs_list]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
"""
This function produces results using multiple runs of independently trained models, through fusion before or after
the non-maximum suppression step.
Args:
input_im (list): an input im list generated from dataset handler.
model_dict (dict): dictionary containing list of models comprising the ensemble.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
outputs_list = []
for model in model_dict:
outputs = model(input_im, return_anchorwise_output=True)
outputs_list.append(outputs)
outputs = self.retinanet_probabilistic_inference(
input_im, ensemble_inference=True, outputs_list=outputs_list)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.model.test_nms_thresh, self.model.max_detections_per_image)
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
This function produces results using forms of bayesian inference instead of NMS for both category and box results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
box_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE
cls_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE
outputs = self.retinanet_probabilistic_inference(input_im)
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
self.model.test_nms_thresh)
keep = keep[: self.model.max_detections_per_image]
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
box_clusters_inds = match_quality_matrix[keep, :]
box_clusters_inds = box_clusters_inds > self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD
# Compute mean and covariance for every cluster.
predicted_prob_vectors_list = []
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_centers = predicted_prob_vectors[keep]
for box_cluster, predicted_prob_vectors_center in zip(
box_clusters_inds, predicted_prob_vectors_centers):
cluster_categorical_params = predicted_prob_vectors[box_cluster]
center_binary_score, center_cat_idx = torch.max(
predicted_prob_vectors_center, 0)
cluster_binary_scores, cat_idx = cluster_categorical_params.max(
1)
class_similarity_idx = cat_idx == center_cat_idx
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors_list.append(
cluster_categorical_params.mean(0).unsqueeze(0))
else:
predicted_prob_vectors_list.append(
predicted_prob_vectors_center.unsqueeze(0))
# Switch to numpy as torch.inverse is too slow.
cluster_means = predicted_boxes[box_cluster,
:][class_similarity_idx].cpu().numpy()
cluster_covs = predicted_boxes_covariance[box_cluster, :][class_similarity_idx].cpu(
).numpy()
predicted_box, predicted_box_covariance = inference_utils.bounding_box_bayesian_inference(
cluster_means, cluster_covs, box_merge_mode)
predicted_boxes_list.append(
torch.from_numpy(np.squeeze(predicted_box)))
predicted_boxes_covariance_list.append(
torch.from_numpy(predicted_box_covariance))
# Switch back to cuda for the remainder of the inference process.
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors = torch.cat(
predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors, 1)
elif cls_merge_mode == 'max_score':
predicted_prob_vectors = predicted_prob_vectors[keep]
predicted_prob = predicted_prob[keep]
classes_idxs = classes_idxs[keep]
result.pred_boxes = Boxes(
torch.stack(
predicted_boxes_list,
0).to(self.model.device))
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0).to(self.model.device)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(
predicted_boxes.shape[0]).to(
self.model.device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(self.model.device)
return result
| 22,131 | 44.445585 | 138 | py |
probdet | probdet-master/src/probabilistic_inference/probabilistic_rcnn_predictor.py | import numpy as np
import torch
# Detectron Imports
from detectron2.layers import batched_nms
from detectron2.structures import Boxes, Instances, pairwise_iou
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class GeneralizedRcnnProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# Define test score threshold
self.test_score_thres = self.model.roi_heads.box_predictor.test_score_thresh
self.test_nms_thresh = self.model.roi_heads.box_predictor.test_nms_thresh
self.test_topk_per_image = self.model.roi_heads.box_predictor.test_topk_per_image
# Create transform
self.sample_box2box_transform = inference_utils.SampleBox2BoxTransform(
self.cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
# Put proposal generator in eval mode if dropout enabled
if self.mc_dropout_enabled:
self.model.proposal_generator.eval()
def generalized_rcnn_probabilistic_inference(self,
input_im,
outputs=None,
ensemble_inference=False,
outputs_list=None):
"""
General RetinaNet probabilistic anchor-wise inference. Preliminary inference step for many post-processing
based inference methods such as standard_nms, output_statistics, and bayes_od.
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): outputs from model.forward(). will be computed internally if not provided.
ensemble_inference (bool): True if ensembles are used for inference. If set to true, outputs_list must be externally provided.
outputs_list (list): List of model() outputs, usually generated from ensembles of models.
Returns:
all_predicted_boxes,
all_predicted_boxes_covariance (Tensor): Nx4x4 vectors used
all_predicted_prob (Tensor): Nx1 scores which represent max of all_pred_prob_vectors. For usage in NMS and mAP computation.
all_classes_idxs (Tensor): Nx1 Class ids to be used for NMS.
all_predicted_prob_vectors (Tensor): NxK tensor where K is the number of classes.
"""
is_epistemic = ((self.mc_dropout_enabled and self.num_mc_dropout_runs > 1)
or ensemble_inference) and outputs is None
if is_epistemic:
if self.mc_dropout_enabled and self.num_mc_dropout_runs > 1:
outputs_list = self.model(
input_im,
return_anchorwise_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs)
proposals_list = [outputs['proposals']
for outputs in outputs_list]
box_delta_list = [outputs['box_delta']
for outputs in outputs_list]
box_cls_list = [outputs['box_cls'] for outputs in outputs_list]
box_reg_var_list = [outputs['box_reg_var']
for outputs in outputs_list]
box_cls_var_list = [outputs['box_cls_var']
for outputs in outputs_list]
outputs = dict()
proposals_all = proposals_list[0].proposal_boxes.tensor
for i in torch.arange(1, len(outputs_list)):
proposals_all = torch.cat(
(proposals_all, proposals_list[i].proposal_boxes.tensor), 0)
proposals_list[0].proposal_boxes.tensor = proposals_all
outputs['proposals'] = proposals_list[0]
box_delta = torch.cat(box_delta_list, 0)
box_cls = torch.cat(box_cls_list, 0)
outputs['box_delta'] = box_delta
outputs['box_cls'] = box_cls
if box_reg_var_list[0] is not None:
box_reg_var = torch.cat(box_reg_var_list, 0)
else:
box_reg_var = None
outputs['box_reg_var'] = box_reg_var
if box_cls_var_list[0] is not None:
box_cls_var = torch.cat(box_cls_var_list, 0)
else:
box_cls_var = None
outputs['box_cls_var'] = box_cls_var
elif outputs is None:
outputs = self.model(input_im,
return_anchorwise_output=True)
proposals = outputs['proposals']
box_cls = outputs['box_cls']
box_delta = outputs['box_delta']
if self.model.cls_var_loss == 'evidential':
box_dir_alphas = inference_utils.get_dir_alphas(box_cls)
box_dir_alphas = box_dir_alphas
box_cls = box_dir_alphas / box_dir_alphas.sum(1, keepdim=True)
else:
if outputs['box_cls_var'] is not None:
box_cls_var = outputs['box_cls_var']
box_cls_dists = torch.distributions.normal.Normal(
box_cls, scale=torch.sqrt(torch.exp(box_cls_var)))
box_cls = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
box_cls = torch.nn.functional.softmax(box_cls, dim=-1)
box_cls = box_cls.mean(0)
else:
box_cls = torch.nn.functional.softmax(box_cls, dim=-1)
# Remove background category
scores = box_cls[:, :-1]
num_bbox_reg_classes = box_delta.shape[1] // 4
box_delta = box_delta.reshape(-1, 4)
box_delta = box_delta.view(-1, num_bbox_reg_classes, 4)
filter_mask = scores > self.test_score_thres
filter_inds = filter_mask.nonzero(as_tuple=False)
if num_bbox_reg_classes == 1:
box_delta = box_delta[filter_inds[:, 0], 0]
else:
box_delta = box_delta[filter_mask]
scores = scores[filter_mask]
proposal_boxes = proposals.proposal_boxes.tensor[filter_inds[:, 0]]
if outputs['box_reg_var'] is not None:
box_reg_var = outputs['box_reg_var']
box_reg_var = box_reg_var.reshape(-1, self.model.bbox_cov_dims)
box_reg_var = box_reg_var.view(-1,
num_bbox_reg_classes,
self.model.bbox_cov_dims)
if num_bbox_reg_classes == 1:
box_reg_var = box_reg_var[filter_inds[:, 0], 0]
else:
box_reg_var = box_reg_var[filter_mask]
# Reconstruct cholesky decomposition of box covariance
# matrix
diag_vars = clamp_log_variance(box_reg_var)
cholesky_decomp = covariance_output_to_cholesky(diag_vars)
# Generate multivariate samples to be used for monte-carlo simulation. We can afford much more samples
# here since the matrix dimensions are much smaller and therefore
# have much less memory footprint. Keep 100 or less to maintain
# reasonable runtime speed.
multivariate_normal_samples = torch.distributions.MultivariateNormal(
box_delta, scale_tril=cholesky_decomp)
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample(
(1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2)
samples_proposals = torch.repeat_interleave(
proposal_boxes.unsqueeze(2), 1000, dim=2)
# Transform samples from deltas to boxes
t_dist_samples = self.sample_box2box_transform.apply_samples_deltas(
distributions_samples, samples_proposals)
# Compute samples mean and covariance matrices.
boxes, boxes_covars = inference_utils.compute_mean_covariance_torch(
t_dist_samples)
else:
# predict boxes
boxes = self.model.roi_heads.box_predictor.box2box_transform.apply_deltas(
box_delta, proposal_boxes)
boxes_covars = []
return boxes, boxes_covars, scores, filter_inds[:,
1], box_cls[filter_inds[:, 0]]
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.test_nms_thresh, self.test_topk_per_image)
def post_processing_output_statistics(self, input_im):
"""
This function produces results using anchor statistics.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
def post_processing_mc_dropout_ensembles(self, input_im):
"""
This function produces results using monte-carlo dropout ensembles.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
# In generalized rcnn models, association cannot be achieved on an anchor level when using
# dropout as anchor order might shift. To overcome this problem, the anchor statistics function
# is used to perform the association and to fuse covariance
# results.
return self.post_processing_output_statistics(input_im)
else:
outputs_list = self.model(
input_im,
return_anchorwise_output=False,
num_mc_dropout_runs=self.num_mc_dropout_runs)
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.generalized_rcnn_probabilistic_inference(
input_im,
outputs=outputs),
self.test_nms_thresh,
self.test_topk_per_image) for outputs in outputs_list]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
outputs_list = []
for model in model_dict:
outputs = model(input_im, return_anchorwise_output=True)
outputs_list.append(outputs)
outputs = self.generalized_rcnn_probabilistic_inference(
input_im, ensemble_inference=True, outputs_list=outputs_list)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
This function produces results using forms of bayesian inference instead of NMS for both category
and box results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
box_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE
cls_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
self.test_nms_thresh)
keep = keep[: self.test_topk_per_image]
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
box_clusters_inds = match_quality_matrix[keep, :]
box_clusters_inds = box_clusters_inds > self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD
# Compute mean and covariance for every cluster.
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_list = []
predicted_prob_vectors_centers = predicted_prob_vectors[keep]
for box_cluster, predicted_prob_vectors_center in zip(
box_clusters_inds, predicted_prob_vectors_centers):
# Ignore background categories provided by detectron2 inference
cluster_categorical_params = predicted_prob_vectors[box_cluster]
_, center_cat_idx = torch.max(predicted_prob_vectors_center, 0)
_, cat_idx = cluster_categorical_params.max(1)
class_similarity_idx = cat_idx == center_cat_idx
if cls_merge_mode == 'bayesian_inference':
cluster_categorical_params = cluster_categorical_params[class_similarity_idx]
predicted_prob_vectors_list.append(
cluster_categorical_params.mean(0).unsqueeze(0))
else:
predicted_prob_vectors_list.append(
predicted_prob_vectors_center.unsqueeze(0))
# Switch to numpy as torch.inverse is too slow.
cluster_means = predicted_boxes[box_cluster,
:][class_similarity_idx].cpu().numpy()
cluster_covs = predicted_boxes_covariance[box_cluster, :][class_similarity_idx].cpu(
).numpy()
predicted_box, predicted_box_covariance = inference_utils.bounding_box_bayesian_inference(
cluster_means, cluster_covs, box_merge_mode)
predicted_boxes_list.append(
torch.from_numpy(np.squeeze(predicted_box)))
predicted_boxes_covariance_list.append(
torch.from_numpy(predicted_box_covariance))
# Switch back to cuda for the remainder of the inference process.
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors = torch.cat(
predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors[:, :-1], 1)
elif cls_merge_mode == 'max_score':
predicted_prob_vectors = predicted_prob_vectors[keep]
predicted_prob = predicted_prob[keep]
classes_idxs = classes_idxs[keep]
result.pred_boxes = Boxes(
torch.stack(
predicted_boxes_list,
0).to(self.model.device))
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0).to(self.model.device)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(
predicted_boxes.shape[0]).to(
self.model.device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(self.model.device)
return result
| 19,086 | 43.491841 | 138 | py |
probdet | probdet-master/src/probabilistic_inference/inference_utils.py | import numpy as np
import os
import torch
from PIL import Image
# Detectron imports
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.layers import batched_nms
from detectron2.structures import BoxMode, Boxes, Instances, pairwise_iou
# Project imports
from probabilistic_inference.image_corruptions import corruption_dict, corruption_tuple
from probabilistic_inference.probabilistic_retinanet_predictor import RetinaNetProbabilisticPredictor
from probabilistic_inference.probabilistic_rcnn_predictor import GeneralizedRcnnProbabilisticPredictor
from probabilistic_inference.probabilistic_detr_predictor import DetrProbabilisticPredictor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def build_predictor(cfg):
"""
Builds probabilistic predictor according to architecture in config file.
Args:
cfg (CfgNode): detectron2 configuration node.
Returns:
Instance of the correct predictor.
"""
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
return RetinaNetProbabilisticPredictor(cfg)
elif cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticGeneralizedRCNN':
return GeneralizedRcnnProbabilisticPredictor(cfg)
elif cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticDetr':
return DetrProbabilisticPredictor(cfg)
else:
raise ValueError(
'Invalid meta-architecture {}.'.format(cfg.MODEL.META_ARCHITECTURE))
def general_standard_nms_postprocessing(input_im,
outputs,
nms_threshold=0.5,
max_detections_per_image=100):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
nms_threshold (float): non-maximum suppression threshold
max_detections_per_image (int): maximum allowed number of detections per image.
Returns:
result (Instances): final results after nms
"""
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
# Perform nms
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
nms_threshold)
keep = keep[: max_detections_per_image]
# Keep highest scoring results
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
# Handle case where there is no covariance matrix such as classical
# inference.
if isinstance(predicted_boxes_covariance, torch.Tensor):
result.pred_boxes_covariance = predicted_boxes_covariance[keep]
else:
result.pred_boxes_covariance = torch.zeros(
predicted_boxes[keep].shape + (4,)).to(device)
return result
def general_output_statistics_postprocessing(input_im,
outputs,
nms_threshold=0.5,
max_detections_per_image=100,
affinity_threshold=0.7):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
nms_threshold (float): non-maximum suppression threshold between 0-1
max_detections_per_image (int): maximum allowed number of detections per image.
affinity_threshold (float): cluster affinity threshold between 0-1
Returns:
result (Instances): final results after nms
"""
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
# Get pairwise iou matrix
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
# Get cluster centers using standard nms. Much faster than sequential
# clustering.
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
nms_threshold)
keep = keep[: max_detections_per_image]
clusters_inds = match_quality_matrix[keep, :]
clusters_inds = clusters_inds > affinity_threshold
# Compute mean and covariance for every cluster.
predicted_prob_vectors_list = []
predicted_boxes_list = []
predicted_boxes_covariance_list = []
for cluster_idxs, center_idx in zip(
clusters_inds, keep):
if cluster_idxs.sum(0) >= 2:
# Make sure to only select cluster members of same class as center
cluster_center_classes_idx = classes_idxs[center_idx]
cluster_classes_idxs = classes_idxs[cluster_idxs]
class_similarity_idxs = cluster_classes_idxs == cluster_center_classes_idx
# Grab cluster
box_cluster = predicted_boxes[cluster_idxs,
:][class_similarity_idxs, :]
cluster_mean = box_cluster.mean(0)
residuals = (box_cluster - cluster_mean).unsqueeze(2)
cluster_covariance = torch.sum(torch.matmul(residuals, torch.transpose(
residuals, 2, 1)), 0) / max((box_cluster.shape[0] - 1), 1.0)
# Assume final result as mean and covariance of gaussian mixture of cluster members if
# covariance is provided by neural network.
if predicted_boxes_covariance is not None:
if len(predicted_boxes_covariance) > 0:
cluster_covariance = cluster_covariance + \
predicted_boxes_covariance[cluster_idxs, :][class_similarity_idxs, :].mean(0)
# Compute average over cluster probabilities
cluster_probs_vector = predicted_prob_vectors[cluster_idxs, :][class_similarity_idxs, :].mean(
0)
else:
cluster_mean = predicted_boxes[center_idx]
cluster_probs_vector = predicted_prob_vectors[center_idx]
cluster_covariance = 1e-4 * torch.eye(4, 4).to(device)
if predicted_boxes_covariance is not None:
if len(predicted_boxes_covariance) > 0:
cluster_covariance = predicted_boxes_covariance[center_idx]
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(cluster_covariance)
predicted_prob_vectors_list.append(cluster_probs_vector)
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
# We do not average the probability vectors for this post processing method. Averaging results in
# very low mAP due to mixing with low scoring detection instances.
result.pred_boxes = Boxes(torch.stack(predicted_boxes_list, 0))
predicted_prob_vectors = torch.stack(predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors, 1)
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(predicted_boxes.shape[0]).to(device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(device)
return result
def general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
nms_threshold=0.5,
max_detections_per_image=100,
affinity_threshold=0.7,
is_generalized_rcnn=False,
merging_method='mixture_of_gaussians'):
"""
Args:
input_im (list): an input im list generated from dataset handler.
ensemble_pred_box_list (list): predicted box list
ensembles_class_idxs_list (list): predicted classes list
ensemble_pred_prob_vectors_list (list): predicted probability vector list
ensembles_pred_box_covariance_list (list): predicted covariance matrices
nms_threshold (float): non-maximum suppression threshold between 0-1
max_detections_per_image (int): Number of maximum allowable detections per image.
affinity_threshold (float): cluster affinity threshold between 0-1
is_generalized_rcnn (bool): used to handle category selection by removing background class.
merging_method (str): default is gaussian mixture model. use 'bayesian_inference' to perform gaussian inference
similar to bayesod.
Returns:
result (Instances): final results after nms
"""
predicted_boxes = torch.cat(ensemble_pred_box_list, 0)
predicted_boxes_covariance = torch.cat(
ensembles_pred_box_covariance_list, 0)
predicted_prob_vectors = torch.cat(
ensemble_pred_prob_vectors_list, 0)
predicted_class_idxs = torch.cat(ensembles_class_idxs_list, 0)
# Compute iou between all output boxes and each other output box.
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
# Perform basic sequential clustering.
clusters = []
for i in range(match_quality_matrix.shape[0]):
# Check if current box is already a member of any previous cluster.
if i != 0:
all_clusters = torch.cat(clusters, 0)
if (all_clusters == i).any():
continue
# Only add if boxes have the same category.
cluster_membership_test = (match_quality_matrix[i,
:] >= affinity_threshold) & (
predicted_class_idxs == predicted_class_idxs[i])
inds = torch.where(cluster_membership_test)
clusters.extend(inds)
# Compute mean and covariance for every cluster.
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_list = []
# Compute cluster mean and covariance matrices.
for cluster in clusters:
box_cluster = predicted_boxes[cluster]
box_cluster_covariance = predicted_boxes_covariance[cluster]
if box_cluster.shape[0] >= 2:
if merging_method == 'mixture_of_gaussians':
cluster_mean = box_cluster.mean(0)
# Compute epistemic covariance
residuals = (box_cluster - cluster_mean).unsqueeze(2)
predicted_covariance = torch.sum(torch.matmul(residuals, torch.transpose(
residuals, 2, 1)), 0) / (box_cluster.shape[0] - 1)
# Add epistemic covariance
predicted_covariance = predicted_covariance + \
box_cluster_covariance.mean(0)
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(predicted_covariance)
predicted_prob_vectors_list.append(
predicted_prob_vectors[cluster].mean(0))
else:
cluster_mean, predicted_covariance = bounding_box_bayesian_inference(box_cluster.cpu(
).numpy(), box_cluster_covariance.cpu().numpy(), box_merge_mode='bayesian_inference')
cluster_mean = torch.as_tensor(cluster_mean).to(device)
predicted_covariance = torch.as_tensor(
predicted_covariance).to(device)
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(predicted_covariance)
predicted_prob_vectors_list.append(
predicted_prob_vectors[cluster].mean(0))
else:
predicted_boxes_list.append(predicted_boxes[cluster].mean(0))
predicted_boxes_covariance_list.append(
predicted_boxes_covariance[cluster].mean(0))
predicted_prob_vectors_list.append(
predicted_prob_vectors[cluster].mean(0))
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
predicted_prob_vectors = torch.stack(predicted_prob_vectors_list, 0)
# Remove background class if generalized rcnn
if is_generalized_rcnn:
predicted_prob_vectors_no_bkg = predicted_prob_vectors[:, :-1]
else:
predicted_prob_vectors_no_bkg = predicted_prob_vectors
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors_no_bkg, 1)
predicted_boxes = torch.stack(predicted_boxes_list, 0)
# We want to keep the maximum allowed boxes per image to be consistent
# with the rest of the methods. However, just sorting by score or uncertainty will lead to a lot of
# redundant detections so we have to use one more NMS step.
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
nms_threshold)
keep = keep[:max_detections_per_image]
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0)[keep]
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(predicted_boxes.shape[0]).to(device)
result.pred_classes = predicted_class_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(device)
return result
def bounding_box_bayesian_inference(cluster_means,
cluster_covs,
box_merge_mode):
"""
Args:
cluster_means (nd array): cluster box means.
cluster_covs (nd array): cluster box covariance matrices.
box_merge_mode (str): whether to use covariance intersection or not
Returns:
final_mean (nd array): cluster fused mean.
final_cov (nd array): cluster fused covariance matrix.
"""
cluster_precs = np.linalg.inv(cluster_covs)
if box_merge_mode == 'bayesian_inference':
final_cov = np.linalg.inv(cluster_precs.sum(0))
final_mean = np.matmul(
cluster_precs, np.expand_dims(cluster_means, 2)).sum(0)
final_mean = np.squeeze(np.matmul(final_cov, final_mean))
elif box_merge_mode == 'covariance_intersection':
cluster_difference_precs = cluster_precs.sum(0) - cluster_precs
cluster_precs_det = np.linalg.det(cluster_precs)
cluster_total_prec_det = np.linalg.det(cluster_precs.sum(0))
cluster_difference_precs_det = np.linalg.det(
cluster_difference_precs)
omegas = (cluster_total_prec_det - cluster_difference_precs_det + cluster_precs_det) / (
cluster_precs.shape[0] * cluster_total_prec_det +
(cluster_precs_det - cluster_difference_precs_det).sum(0))
weighted_cluster_precs = np.expand_dims(
omegas, (1, 2)) * cluster_precs
final_cov = np.linalg.inv(weighted_cluster_precs.sum(0))
final_mean = np.squeeze(np.matmul(
final_cov,
np.matmul(
weighted_cluster_precs,
np.expand_dims(cluster_means, 2)).sum(0)))
return final_mean, final_cov
def compute_mean_covariance_torch(input_samples):
"""
Function for efficient computation of mean and covariance matrix in pytorch.
Args:
input_samples(list): list of tensors from M stochastic monte-carlo sampling runs, each containing N x k tensors.
Returns:
predicted_mean(Tensor): an Nxk tensor containing the predicted mean.
predicted_covariance(Tensor): an Nxkxk tensor containing the predicted covariance matrix.
"""
if isinstance(input_samples, torch.Tensor):
num_samples = input_samples.shape[2]
else:
num_samples = len(input_samples)
input_samples = torch.stack(input_samples, 2)
# Compute Mean
predicted_mean = torch.mean(input_samples, 2, keepdim=True)
# Compute Covariance
residuals = torch.transpose(
torch.unsqueeze(
input_samples -
predicted_mean,
1),
1,
3)
predicted_covariance = torch.matmul(
residuals, torch.transpose(residuals, 3, 2))
predicted_covariance = torch.sum(
predicted_covariance, 1) / (num_samples - 1)
return predicted_mean.squeeze(2), predicted_covariance
def probabilistic_detector_postprocess(
results,
output_height,
output_width):
"""
Resize the output instances and scales estimated covariance matrices.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
Args:
results (Dict): the raw outputs from the probabilistic detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height: the desired output resolution.
output_width: the desired output resolution.
Returns:
results (Dict): dictionary updated with rescaled boxes and covariance matrices.
"""
scale_x, scale_y = (output_width /
results.image_size[1], output_height /
results.image_size[0])
results = Instances((output_height, output_width), **results.get_fields())
output_boxes = results.pred_boxes
# Scale bounding boxes
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
# Scale covariance matrices
if results.has("pred_boxes_covariance"):
# Add small value to make sure covariance matrix is well conditioned
output_boxes_covariance = results.pred_boxes_covariance + 1e-4 * \
torch.eye(results.pred_boxes_covariance.shape[2]).to(device)
scale_mat = torch.diag_embed(
torch.as_tensor(
(scale_x,
scale_y,
scale_x,
scale_y))).to(device).unsqueeze(0)
scale_mat = torch.repeat_interleave(
scale_mat, output_boxes_covariance.shape[0], 0)
output_boxes_covariance = torch.matmul(
torch.matmul(
scale_mat,
output_boxes_covariance),
torch.transpose(scale_mat, 2, 1))
results.pred_boxes_covariance = output_boxes_covariance
return results
def covar_xyxy_to_xywh(output_boxes_covariance):
"""
Converts covariance matrices from top-left bottom-right corner representation to top-left corner
and width-height representation.
Args:
output_boxes_covariance: Input covariance matrices.
Returns:
output_boxes_covariance (Nxkxk): Transformed covariance matrices
"""
transformation_mat = torch.as_tensor([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[-1.0, 0, 1.0, 0],
[0, -1.0, 0, 1.0]]).to(device).unsqueeze(0)
transformation_mat = torch.repeat_interleave(
transformation_mat, output_boxes_covariance.shape[0], 0)
output_boxes_covariance = torch.matmul(
torch.matmul(
transformation_mat,
output_boxes_covariance),
torch.transpose(transformation_mat, 2, 1))
return output_boxes_covariance
def instances_to_json(instances, img_id, cat_mapping_dict=None):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances): detectron2 instances
img_id (int): the image id
cat_mapping_dict (dict): dictionary to map between raw category id from net and dataset id. very important if
performing inference on different dataset than that used for training.
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.cpu().numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.cpu().tolist()
classes = instances.pred_classes.cpu().tolist()
classes = [
cat_mapping_dict[class_i] if class_i in cat_mapping_dict.keys() else -
1 for class_i in classes]
pred_cls_probs = instances.pred_cls_probs.cpu().tolist()
if instances.has("pred_boxes_covariance"):
pred_boxes_covariance = covar_xyxy_to_xywh(
instances.pred_boxes_covariance).cpu().tolist()
else:
pred_boxes_covariance = []
results = []
for k in range(num_instance):
if classes[k] != -1:
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
"cls_prob": pred_cls_probs[k],
"bbox_covar": pred_boxes_covariance[k]
}
results.append(result)
return results
class SampleBox2BoxTransform(Box2BoxTransform):
"""
Extension of Box2BoxTransform to support transforming across batch sizes.
"""
def apply_samples_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2, :] - boxes[:, 0, :]
heights = boxes[:, 3, :] - boxes[:, 1, :]
ctr_x = boxes[:, 0, :] + 0.5 * widths
ctr_y = boxes[:, 1, :] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4, :] / wx
dy = deltas[:, 1::4, :] / wy
dw = deltas[:, 2::4, :] / ww
dh = deltas[:, 3::4, :] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4, :] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4, :] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4, :] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4, :] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
def corrupt(x, severity=1, corruption_name=None, corruption_number=None):
"""
:param x: image to corrupt; a 224x224x3 numpy array in [0, 255]
:param severity: strength with which to corrupt x; an integer in [0, 5]
:param corruption_name: specifies which corruption function to call;
must be one of 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression',
'speckle_noise', 'gaussian_blur', 'spatter', 'saturate';
the last four are validation functions
:param corruption_number: the position of the corruption_name in the above list;
an integer in [0, 18]; useful for easy looping; 15, 16, 17, 18 are validation corruption numbers
:return: the image x corrupted by a corruption function at the given severity; same shape as input
"""
if corruption_name is not None:
x_corrupted = corruption_dict[corruption_name](
Image.fromarray(x), severity)
elif corruption_number is not None:
x_corrupted = corruption_tuple[corruption_number](
Image.fromarray(x), severity)
else:
raise ValueError(
"Either corruption_name or corruption_number must be passed")
if x_corrupted.shape != x.shape:
raise AssertionError("Output image not same size as input image!")
return np.uint8(x_corrupted)
def get_dir_alphas(pred_class_logits):
"""
Function to get dirichlet parameters from logits
Args:
pred_class_logits: class logits
"""
return torch.relu_(pred_class_logits) + 1.0
def get_inference_output_dir(output_dir_name,
test_dataset_name,
inference_config_name,
image_corruption_level):
return os.path.join(
output_dir_name,
'inference',
test_dataset_name,
os.path.split(inference_config_name)[-1][:-5],
"corruption_level_" + str(image_corruption_level))
| 25,812 | 38.896445 | 120 | py |
probdet | probdet-master/src/probabilistic_inference/probabilistic_detr_predictor.py | import numpy as np
import torch
import torch.nn.functional as F
# DETR imports
from detr.util.box_ops import box_cxcywh_to_xyxy
# Detectron Imports
from detectron2.structures import Boxes
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class DetrProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# These are mock variables to be compatible with probabilistic detectron library. No NMS is performed for DETR.
# Only needed for ensemble methods
self.test_nms_thresh = 0.5
self.test_topk_per_image = self.model.detr.num_queries
def detr_probabilistic_inference(self,
input_im):
outputs = self.model(input_im,
return_raw_results=True,
is_mc_dropout=self.mc_dropout_enabled)
image_width = input_im[0]['image'].shape[2]
image_height = input_im[0]['image'].shape[1]
# Handle logits and classes
predicted_logits = outputs['pred_logits'][0]
if 'pred_logits_var' in outputs.keys():
predicted_logits_var = outputs['pred_logits_var'][0]
box_cls_dists = torch.distributions.normal.Normal(
predicted_logits, scale=torch.sqrt(
torch.exp(predicted_logits_var)))
predicted_logits = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob_vectors = predicted_prob_vectors.mean(0)
else:
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob, classes_idxs = predicted_prob_vectors[:, :-1].max(-1)
# Handle boxes and covariance matrices
predicted_boxes = outputs['pred_boxes'][0]
# Rescale boxes to inference image size (not COCO original size)
pred_boxes = Boxes(box_cxcywh_to_xyxy(predicted_boxes))
pred_boxes.scale(scale_x=image_width, scale_y=image_height)
predicted_boxes = pred_boxes.tensor
# Rescale boxes to inference image size (not COCO original size)
if 'pred_boxes_cov' in outputs.keys():
predicted_boxes_covariance = covariance_output_to_cholesky(
outputs['pred_boxes_cov'][0])
predicted_boxes_covariance = torch.matmul(
predicted_boxes_covariance, predicted_boxes_covariance.transpose(
1, 2))
transform_mat = torch.tensor([[[1.0, 0.0, -0.5, 0.0],
[0.0, 1.0, 0.0, -0.5],
[1.0, 0.0, 0.5, 0.0],
[0.0, 1.0, 0.0, 0.5]]]).to(self.model.device)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
transform_mat,
predicted_boxes_covariance),
transform_mat.transpose(
1,
2))
scale_mat = torch.diag_embed(
torch.as_tensor(
(image_width,
image_height,
image_width,
image_height),
dtype=torch.float32)).to(
self.model.device).unsqueeze(0)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
scale_mat,
predicted_boxes_covariance),
torch.transpose(scale_mat, 2, 1))
else:
predicted_boxes_covariance = []
return predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.detr_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
Output statistics does not make much sense for DETR architecture. There is some redundancy due to forced 100
detections per image, but cluster sizes would be too small for meaningful estimates. Might implement it later
on.
"""
raise NotImplementedError
pass
def post_processing_mc_dropout_ensembles(self, input_im):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.detr_probabilistic_inference(input_im),
self.test_nms_thresh,
self.test_topk_per_image) for _ in range(
self.num_mc_dropout_runs)]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
Since there is no NMS step in DETR, bayesod is not implemented. Although possible to add NMS
and implement it later on.
"""
raise NotImplementedError
pass
| 8,454 | 41.275 | 119 | py |
probdet | probdet-master/src/probabilistic_modeling/probabilistic_retinanet.py | import logging
import math
from typing import List
import torch
from fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss
from torch import nn, distributions
# Detectron Imports
from detectron2.layers import ShapeSpec, cat
from detectron2.utils.events import get_event_storage
from detectron2.modeling.anchor_generator import build_anchor_generator
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.retinanet import RetinaNet, RetinaNetHead, permute_to_N_HWA_K
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.structures import Boxes
# Project Imports
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance, get_probabilistic_loss_weight
@META_ARCH_REGISTRY.register()
class ProbabilisticRetinaNet(RetinaNet):
"""
Probabilistic retinanet class.
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != 'none'
self.cls_var_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != 'none'
self.bbox_cov_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
self.bbox_cov_type = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
if self.bbox_cov_type == 'diagonal':
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.current_step = 0
self.annealing_step = cfg.SOLVER.STEPS[1]
# Define custom probabilistic head
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.head_in_features]
self.head = ProbabilisticRetinaNetHead(
cfg,
self.use_dropout,
self.dropout_rate,
self.compute_cls_var,
self.compute_bbox_cov,
self.bbox_cov_dims,
feature_shapes)
# Send to device
self.to(self.device)
def forward(
self,
batched_inputs,
return_anchorwise_output=False,
num_mc_dropout_runs=-1):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_anchorwise_output (bool): returns raw output for probabilistic inference
num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and
not full neural network.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
# Preprocess image
images = self.preprocess_image(batched_inputs)
# Extract features and generate anchors
features = self.backbone(images.tensor)
features = [features[f] for f in self.head_in_features]
anchors = self.anchor_generator(features)
# MC_Dropout inference forward
if num_mc_dropout_runs > 1:
anchors = anchors * num_mc_dropout_runs
features = features * num_mc_dropout_runs
output_dict = self.produce_raw_output(anchors, features)
return output_dict
# Regular inference forward
if return_anchorwise_output:
return self.produce_raw_output(anchors, features)
# Training and validation forward
pred_logits, pred_anchor_deltas, pred_logits_vars, pred_anchor_deltas_vars = self.head(
features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [
permute_to_N_HWA_K(
x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [
permute_to_N_HWA_K(
x, 4) for x in pred_anchor_deltas]
if pred_logits_vars is not None:
pred_logits_vars = [
permute_to_N_HWA_K(
x, self.num_classes) for x in pred_logits_vars]
if pred_anchor_deltas_vars is not None:
pred_anchor_deltas_vars = [permute_to_N_HWA_K(
x, self.bbox_cov_dims) for x in pred_anchor_deltas_vars]
if self.training:
assert "instances" in batched_inputs[0], "Instance annotations are missing in training!"
gt_instances = [
x["instances"].to(
self.device) for x in batched_inputs]
gt_classes, gt_boxes = self.label_anchors(
anchors, gt_instances)
self.anchors = torch.cat(
[Boxes.cat(anchors).tensor for i in range(len(gt_instances))], 0)
# Loss is computed based on what values are to be estimated by the neural
# network
losses = self.losses(
anchors,
gt_classes,
gt_boxes,
pred_logits,
pred_anchor_deltas,
pred_logits_vars,
pred_anchor_deltas_vars)
self.current_step += 1
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
results = self.inference(
anchors, pred_logits, pred_anchor_deltas, images.image_sizes)
self.visualize_training(batched_inputs, results)
return losses
else:
results = self.inference(
anchors,
pred_logits,
pred_anchor_deltas,
images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def losses(
self,
anchors,
gt_classes,
gt_boxes,
pred_class_logits,
pred_anchor_deltas,
pred_class_logits_var=None,
pred_bbox_cov=None):
"""
Args:
For `gt_classes` and `gt_anchors_deltas` parameters, see
:meth:`RetinaNet.get_ground_truth`.
Their shapes are (N, R) and (N, R, 4), respectively, where R is
the total number of anchors across levels, i.e. sum(Hi x Wi x A)
For `pred_class_logits`, `pred_anchor_deltas`, `pred_class_logits_var` and `pred_bbox_cov`, see
:meth:`RetinaNetHead.forward`.
Returns:
dict[str: Tensor]:
mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The dict keys are:
"loss_cls" and "loss_box_reg"
"""
num_images = len(gt_classes)
gt_labels = torch.stack(gt_classes) # (N, R)
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
gt_anchor_deltas = [
self.box2box_transform.get_deltas(
anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
valid_mask = gt_labels >= 0
pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + \
(1 - self.loss_normalizer_momentum) * max(num_pos_anchors, 1)
# classification and regression loss
# Shapes:
# (N x R, K) for class_logits and class_logits_var.
# (N x R, 4), (N x R x 10) for pred_anchor_deltas and pred_class_bbox_cov respectively.
# Transform per-feature layer lists to a single tensor
pred_class_logits = cat(pred_class_logits, dim=1)
pred_anchor_deltas = cat(pred_anchor_deltas, dim=1)
if pred_class_logits_var is not None:
pred_class_logits_var = cat(
pred_class_logits_var, dim=1)
if pred_bbox_cov is not None:
pred_bbox_cov = cat(
pred_bbox_cov, dim=1)
gt_classes_target = torch.nn.functional.one_hot(
gt_labels[valid_mask],
num_classes=self.num_classes +
1)[
:,
:-
1].to(
pred_class_logits[0].dtype) # no loss for the last (background) class
# Classification losses
if self.compute_cls_var:
# Compute classification variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
if self.cls_var_loss == 'loss_attenuation':
num_samples = self.cls_var_num_samples
# Compute standard deviation
pred_class_logits_var = torch.sqrt(torch.exp(
pred_class_logits_var[valid_mask]))
pred_class_logits = pred_class_logits[valid_mask]
# Produce normal samples using logits as the mean and the standard deviation computed above
# Scales with GPU memory. 12 GB ---> 3 Samples per anchor for
# COCO dataset.
univariate_normal_dists = distributions.normal.Normal(
pred_class_logits, scale=pred_class_logits_var)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(num_samples,))
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
(pred_class_stochastic_logits.shape[1] * num_samples, pred_class_stochastic_logits.shape[2], -1))
pred_class_stochastic_logits = pred_class_stochastic_logits.squeeze(
2)
# Produce copies of the target classes to match the number of
# stochastic samples.
gt_classes_target = torch.unsqueeze(gt_classes_target, 0)
gt_classes_target = torch.repeat_interleave(
gt_classes_target, num_samples, dim=0).view(
(gt_classes_target.shape[1] * num_samples, gt_classes_target.shape[2], -1))
gt_classes_target = gt_classes_target.squeeze(2)
# Produce copies of the target classes to form the stochastic
# focal loss.
loss_cls = sigmoid_focal_loss_jit(
pred_class_stochastic_logits,
gt_classes_target,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
) / (num_samples * max(1, self.loss_normalizer))
else:
raise ValueError(
'Invalid classification loss name {}.'.format(
self.bbox_cov_loss))
else:
# Standard loss computation in case one wants to use this code
# without any probabilistic inference.
loss_cls = sigmoid_focal_loss_jit(
pred_class_logits[valid_mask],
gt_classes_target,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
) / max(1, self.loss_normalizer)
# Compute Regression Loss
pred_anchor_deltas = pred_anchor_deltas[pos_mask]
gt_anchors_deltas = gt_anchor_deltas[pos_mask]
if self.compute_bbox_cov:
# We have to clamp the output variance else probabilistic metrics
# go to infinity.
pred_bbox_cov = clamp_log_variance(pred_bbox_cov[pos_mask])
if self.bbox_cov_loss == 'negative_log_likelihood':
if self.bbox_cov_type == 'diagonal':
# Compute regression variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
# This implementation with smooth_l1_loss outperforms using
# torch.distribution.multivariate_normal. Losses might have different numerical values
# since we do not include constants in this implementation.
loss_box_reg = 0.5 * torch.exp(-pred_bbox_cov) * smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta)
loss_covariance_regularize = 0.5 * pred_bbox_cov
loss_box_reg += loss_covariance_regularize
# Sum over all elements
loss_box_reg = torch.sum(
loss_box_reg) / max(1, self.loss_normalizer)
else:
# Multivariate negative log likelihood. Implemented with
# pytorch multivariate_normal.log_prob function. Custom implementations fail to finish training
# due to NAN loss.
# This is the Cholesky decomposition of the covariance matrix. We reconstruct it from 10 estimated
# parameters as a lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
pred_bbox_cov)
# Compute multivariate normal distribution using torch
# distribution functions.
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
pred_anchor_deltas, scale_tril=forecaster_cholesky)
loss_box_reg = - \
multivariate_normal_dists.log_prob(gt_anchors_deltas)
loss_box_reg = torch.sum(
loss_box_reg) / max(1, self.loss_normalizer)
elif self.bbox_cov_loss == 'second_moment_matching':
# Compute regression covariance using second moment matching.
loss_box_reg = smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta)
# Compute errors
errors = (pred_anchor_deltas - gt_anchors_deltas)
if self.bbox_cov_type == 'diagonal':
# Compute second moment matching term.
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_bbox_cov), errors ** 2, beta=self.smooth_l1_beta)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(
loss_box_reg) / max(1, self.loss_normalizer)
else:
# Compute second moment matching term.
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(
errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix. We reconstruct it from 10 estimated
# parameters as a lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
pred_bbox_cov)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(
forecaster_cholesky, 2, 1))
second_moment_matching_term = smooth_l1_loss(
predicted_covar, gt_error_covar, beta=self.smooth_l1_beta, reduction='sum')
loss_box_reg = (torch.sum(
loss_box_reg) + second_moment_matching_term) / max(1, self.loss_normalizer)
elif self.bbox_cov_loss == 'energy_loss':
# Compute regression variance according to energy score loss.
forecaster_means = pred_anchor_deltas
# Compute forecaster cholesky. Takes care of diagonal case
# automatically.
forecaster_cholesky = covariance_output_to_cholesky(
pred_bbox_cov)
# Define normal distribution samples. To compute energy score,
# we need i+1 samples.
# Define per-anchor Distributions
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
forecaster_means, scale_tril=forecaster_cholesky)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,))
distributions_samples_1 = distributions_samples[0:self.bbox_cov_num_samples, :, :]
distributions_samples_2 = distributions_samples[1:
self.bbox_cov_num_samples + 1, :, :]
# Compute energy score
gt_anchors_deltas_samples = torch.repeat_interleave(
gt_anchors_deltas.unsqueeze(0), self.bbox_cov_num_samples, dim=0)
energy_score_first_term = 2.0 * smooth_l1_loss(
distributions_samples_1,
gt_anchors_deltas_samples,
beta=self.smooth_l1_beta,
reduction="sum") / self.bbox_cov_num_samples # First term
energy_score_second_term = - smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum") / self.bbox_cov_num_samples # Second term
# Final Loss
loss_box_reg = (
energy_score_first_term + energy_score_second_term) / max(1, self.loss_normalizer)
else:
raise ValueError(
'Invalid regression loss name {}.'.format(
self.bbox_cov_loss))
# Perform loss annealing. Essential for reliably training variance estimates using NLL in RetinaNet.
# For energy score and second moment matching, this is optional.
standard_regression_loss = smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
reduction="sum",
) / max(1, self.loss_normalizer)
probabilistic_loss_weight = get_probabilistic_loss_weight(
self.current_step, self.annealing_step)
loss_box_reg = (1.0 - probabilistic_loss_weight) * \
standard_regression_loss + probabilistic_loss_weight * loss_box_reg
else:
# Standard regression loss in case no variance is needed to be
# estimated.
loss_box_reg = smooth_l1_loss(
pred_anchor_deltas,
gt_anchors_deltas,
beta=self.smooth_l1_beta,
reduction="sum",
) / max(1, self.loss_normalizer)
return {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
def produce_raw_output(self, anchors, features):
"""
Given anchors and features, produces raw pre-nms output to be used for custom fusion operations.
"""
# Perform inference run
pred_logits, pred_anchor_deltas, pred_logits_vars, pred_anchor_deltas_vars = self.head(
features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [
permute_to_N_HWA_K(
x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [
permute_to_N_HWA_K(
x, 4) for x in pred_anchor_deltas]
if pred_logits_vars is not None:
pred_logits_vars = [
permute_to_N_HWA_K(
x, self.num_classes) for x in pred_logits_vars]
if pred_anchor_deltas_vars is not None:
pred_anchor_deltas_vars = [permute_to_N_HWA_K(
x, self.bbox_cov_dims) for x in pred_anchor_deltas_vars]
# Create raw output dictionary
raw_output = {'anchors': anchors}
# Shapes:
# (N x R, K) for class_logits and class_logits_var.
# (N x R, 4), (N x R x 10) for pred_anchor_deltas and pred_class_bbox_cov respectively.
raw_output.update({'box_cls': pred_logits,
'box_delta': pred_anchor_deltas,
'box_cls_var': pred_logits_vars,
'box_reg_var': pred_anchor_deltas_vars})
return raw_output
class ProbabilisticRetinaNetHead(RetinaNetHead):
"""
The head used in ProbabilisticRetinaNet for object class probability estimation, box regression, box covariance estimation.
It has three subnets for the three tasks, with a common structure but separate parameters.
"""
def __init__(self,
cfg,
use_dropout,
dropout_rate,
compute_cls_var,
compute_bbox_cov,
bbox_cov_dims,
input_shape: List[ShapeSpec]):
super().__init__(cfg, input_shape)
# Extract config information
# fmt: off
in_channels = input_shape[0].channels
num_classes = cfg.MODEL.RETINANET.NUM_CLASSES
num_convs = cfg.MODEL.RETINANET.NUM_CONVS
prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors
# fmt: on
assert (
len(set(num_anchors)) == 1
), "Using different number of anchors between levels is not currently supported!"
num_anchors = num_anchors[0]
self.compute_cls_var = compute_cls_var
self.compute_bbox_cov = compute_bbox_cov
self.bbox_cov_dims = bbox_cov_dims
# For consistency all configs are grabbed from original RetinaNet
self.use_dropout = use_dropout
self.dropout_rate = dropout_rate
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1))
cls_subnet.append(nn.ReLU())
bbox_subnet.append(
nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1))
bbox_subnet.append(nn.ReLU())
if self.use_dropout:
cls_subnet.append(nn.Dropout(p=self.dropout_rate))
bbox_subnet.append(nn.Dropout(p=self.dropout_rate))
self.cls_subnet = nn.Sequential(*cls_subnet)
self.bbox_subnet = nn.Sequential(*bbox_subnet)
self.cls_score = nn.Conv2d(
in_channels,
num_anchors *
num_classes,
kernel_size=3,
stride=1,
padding=1)
self.bbox_pred = nn.Conv2d(
in_channels,
num_anchors * 4,
kernel_size=3,
stride=1,
padding=1)
for modules in [
self.cls_subnet,
self.bbox_subnet,
self.cls_score,
self.bbox_pred]:
for layer in modules.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_score.bias, bias_value)
# Create subnet for classification variance estimation.
if self.compute_cls_var:
self.cls_var = nn.Conv2d(
in_channels,
num_anchors *
num_classes,
kernel_size=3,
stride=1,
padding=1)
for layer in self.cls_var.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, -10.0)
# Create subnet for bounding box covariance estimation.
if self.compute_bbox_cov:
self.bbox_cov = nn.Conv2d(
in_channels,
num_anchors * self.bbox_cov_dims,
kernel_size=3,
stride=1,
padding=1)
for layer in self.bbox_cov.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.0001)
torch.nn.init.constant_(layer.bias, 0)
def forward(self, features):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
logits_var (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the variance of the logits modeled as a univariate
Gaussian distribution at each spatial position for each of the A anchors and K object
classes.
bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
bbox_cov (list[Tensor]): #lvl tensors, each has shape (N, Ax4 or Ax10, Hi, Wi).
The tensor predicts elements of the box
covariance values for every anchor. The dimensions of the box covarianc
depends on estimating a full covariance (10) or a diagonal covariance matrix (4).
"""
logits = []
bbox_reg = []
logits_var = []
bbox_cov = []
for feature in features:
logits.append(self.cls_score(self.cls_subnet(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))
if self.compute_cls_var:
logits_var.append(self.cls_var(self.cls_subnet(feature)))
if self.compute_bbox_cov:
bbox_cov.append(self.bbox_cov(self.bbox_subnet(feature)))
return_vector = [logits, bbox_reg]
if self.compute_cls_var:
return_vector.append(logits_var)
else:
return_vector.append(None)
if self.compute_bbox_cov:
return_vector.append(bbox_cov)
else:
return_vector.append(None)
return return_vector
| 28,509 | 41.362556 | 130 | py |
probdet | probdet-master/src/probabilistic_modeling/modeling_utils.py | import torch
def covariance_output_to_cholesky(pred_bbox_cov):
"""
Transforms output to covariance cholesky decomposition.
Args:
pred_bbox_cov (kx4 or kx10): Output covariance matrix elements.
Returns:
predicted_cov_cholesky (kx4x4): cholesky factor matrix
"""
# Embed diagonal variance
diag_vars = torch.sqrt(torch.exp(pred_bbox_cov[:, 0:4]))
predicted_cov_cholesky = torch.diag_embed(diag_vars)
if pred_bbox_cov.shape[1] > 4:
tril_indices = torch.tril_indices(row=4, col=4, offset=-1)
predicted_cov_cholesky[:, tril_indices[0],
tril_indices[1]] = pred_bbox_cov[:, 4:]
return predicted_cov_cholesky
def clamp_log_variance(pred_bbox_cov, clamp_min=-7.0, clamp_max=7.0):
"""
Tiny function that clamps variance for consistency across all methods.
"""
pred_bbox_var_component = torch.clamp(
pred_bbox_cov[:, 0:4], clamp_min, clamp_max)
return torch.cat((pred_bbox_var_component, pred_bbox_cov[:, 4:]), dim=1)
def get_probabilistic_loss_weight(current_step, annealing_step):
"""
Tiny function to get adaptive probabilistic loss weight for consistency across all methods.
"""
probabilistic_loss_weight = min(1.0, current_step / annealing_step)
probabilistic_loss_weight = (
100 ** probabilistic_loss_weight - 1.0) / (100.0 - 1.0)
return probabilistic_loss_weight
| 1,431 | 32.302326 | 95 | py |
probdet | probdet-master/src/probabilistic_modeling/probabilistic_generalized_rcnn.py | import logging
import numpy as np
import torch
from typing import Dict, List, Union, Optional, Tuple
from torch.nn import functional as F
from torch import nn, distributions
# Detectron imports
import fvcore.nn.weight_init as weight_init
from detectron2.config import configurable
from detectron2.layers import Linear, ShapeSpec, cat, Conv2d, get_norm
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.modeling.roi_heads.box_head import ROI_BOX_HEAD_REGISTRY
from detectron2.structures import Boxes, Instances, ImageList
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from fvcore.nn import smooth_l1_loss
# Project imports
from probabilistic_inference.inference_utils import get_dir_alphas
from probabilistic_modeling.modeling_utils import get_probabilistic_loss_weight, clamp_log_variance, covariance_output_to_cholesky
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
@META_ARCH_REGISTRY.register()
class ProbabilisticGeneralizedRCNN(GeneralizedRCNN):
"""
Probabilistic GeneralizedRCNN class.
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != 'none'
self.cls_var_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != 'none'
self.bbox_cov_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
self.bbox_cov_type = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
if self.bbox_cov_type == 'diagonal':
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.num_mc_dropout_runs = -1
self.current_step = 0
# Define custom probabilistic head
self.roi_heads.box_predictor = ProbabilisticFastRCNNOutputLayers(
cfg,
self.roi_heads.box_head.output_shape,
self.compute_cls_var,
self.cls_var_loss,
self.cls_var_num_samples,
self.compute_bbox_cov,
self.bbox_cov_loss,
self.bbox_cov_type,
self.bbox_cov_dims,
self.bbox_cov_num_samples)
# Send to device
self.to(self.device)
def forward(self,
batched_inputs,
return_anchorwise_output=False,
num_mc_dropout_runs=-1):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_anchorwise_output (bool): returns raw output for probabilistic inference
num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and
not full neural network.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
if not self.training and num_mc_dropout_runs == -1:
if return_anchorwise_output:
return self.produce_raw_output(batched_inputs)
else:
return self.inference(batched_inputs)
elif self.training and num_mc_dropout_runs > 1:
self.num_mc_dropout_runs = num_mc_dropout_runs
output_list = []
for i in range(num_mc_dropout_runs):
output_list.append(self.produce_raw_output(batched_inputs))
return output_list
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [
x["instances"].to(
self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN,
"'targets' in the model inputs is now renamed to 'instances'!",
n=10)
gt_instances = [x["targets"].to(self.device)
for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(
images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device)
for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(
images, features, proposals, gt_instances, current_step=self.current_step)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
self.current_step += 1
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def produce_raw_output(self, batched_inputs, detected_instances=None):
"""
Run inference on the given inputs and return proposal-wise output for later postprocessing.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
Returns:
same as in :meth:`forward`.
"""
raw_output = dict()
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [
x["proposals"].to(
self.device) for x in batched_inputs]
# Create raw output dictionary
raw_output.update({'proposals': proposals[0]})
results, _ = self.roi_heads(
images, features, proposals, None, produce_raw_output=True, num_mc_dropout_runs=self.num_mc_dropout_runs)
else:
detected_instances = [x.to(self.device)
for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(
features, detected_instances)
box_cls, box_delta, box_cls_var, box_reg_var = results
raw_output.update({'box_cls': box_cls,
'box_delta': box_delta,
'box_cls_var': box_cls_var,
'box_reg_var': box_reg_var})
return raw_output
@ROI_HEADS_REGISTRY.register()
class ProbabilisticROIHeads(StandardROIHeads):
"""
Probabilistic ROI heads, inherit from standard ROI heads so can be used with mask RCNN in theory.
"""
def __init__(self, cfg, input_shape):
super(ProbabilisticROIHeads, self).__init__(cfg, input_shape)
self.is_mc_dropout_inference = False
self.produce_raw_output = False
self.current_step = 0
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
num_mc_dropout_runs=-1,
produce_raw_output=False,
current_step=0.0,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
self.is_mc_dropout_inference = num_mc_dropout_runs > 1
self.produce_raw_output = produce_raw_output
self.current_step = current_step
del images
if self.training and not self.is_mc_dropout_inference:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training and not self.is_mc_dropout_inference:
losses = self._forward_box(features, proposals)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals)
if self.produce_raw_output:
return pred_instances, {}
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(
features, pred_instances)
return pred_instances, {}
def _forward_box(
self, features: Dict[str, torch.Tensor], proposals: List[Instances]
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(
features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if self.produce_raw_output:
return predictions
if self.training:
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(
pred_boxes_per_image)
return self.box_predictor.losses(
predictions, proposals, self.current_step)
else:
pred_instances, _ = self.box_predictor.inference(
predictions, proposals)
return pred_instances
class ProbabilisticFastRCNNOutputLayers(nn.Module):
"""
Four linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
(3) box regression deltas covariance parameters (if needed)
(4) classification logits variance (if needed)
"""
@configurable
def __init__(
self,
input_shape,
*,
box2box_transform,
num_classes,
cls_agnostic_bbox_reg=False,
smooth_l1_beta=0.0,
test_score_thresh=0.0,
test_nms_thresh=0.5,
test_topk_per_image=100,
compute_cls_var=False,
compute_bbox_cov=False,
bbox_cov_dims=4,
cls_var_loss='none',
cls_var_num_samples=10,
bbox_cov_loss='none',
bbox_cov_type='diagonal',
dropout_rate=0.0,
annealing_step=0,
bbox_cov_num_samples=1000
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss.
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
compute_cls_var (bool): compute classification variance
compute_bbox_cov (bool): compute box covariance regression parameters.
bbox_cov_dims (int): 4 for diagonal covariance, 10 for full covariance.
cls_var_loss (str): name of classification variance loss.
cls_var_num_samples (int): number of samples to be used for loss computation. Usually between 10-100.
bbox_cov_loss (str): name of box covariance loss.
bbox_cov_type (str): 'diagonal' or 'full'. This is used to train with loss functions that accept both types.
dropout_rate (float): 0-1, probability of drop.
annealing_step (int): step used for KL-divergence in evidential loss to fully be functional.
"""
super().__init__()
if isinstance(input_shape, int): # some backward compatibility
input_shape = ShapeSpec(channels=input_shape)
input_size = input_shape.channels * \
(input_shape.width or 1) * (input_shape.height or 1)
self.compute_cls_var = compute_cls_var
self.compute_bbox_cov = compute_bbox_cov
self.bbox_cov_dims = bbox_cov_dims
self.bbox_cov_num_samples = bbox_cov_num_samples
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self.cls_var_loss = cls_var_loss
self.cls_var_num_samples = cls_var_num_samples
self.annealing_step = annealing_step
self.bbox_cov_loss = bbox_cov_loss
self.bbox_cov_type = bbox_cov_type
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1.0 if cls_agnostic_bbox_reg else num_classes
box_dim = len(box2box_transform.weights)
self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.compute_cls_var:
self.cls_var = Linear(input_size, num_classes + 1)
nn.init.normal_(self.cls_var.weight, std=0.0001)
nn.init.constant_(self.cls_var.bias, 0)
if self.compute_bbox_cov:
self.bbox_cov = Linear(
input_size,
num_bbox_reg_classes *
bbox_cov_dims)
nn.init.normal_(self.bbox_cov.weight, std=0.0001)
nn.init.constant_(self.bbox_cov.bias, 0)
self.box2box_transform = box2box_transform
self.smooth_l1_beta = smooth_l1_beta
self.test_score_thresh = test_score_thresh
self.test_nms_thresh = test_nms_thresh
self.test_topk_per_image = test_topk_per_image
@classmethod
def from_config(cls,
cfg,
input_shape,
compute_cls_var,
cls_var_loss,
cls_var_num_samples,
compute_bbox_cov,
bbox_cov_loss,
bbox_cov_type,
bbox_cov_dims,
bbox_cov_num_samples):
return {
"input_shape": input_shape,
"box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
# fmt: off
"num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES,
"cls_agnostic_bbox_reg": cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
"smooth_l1_beta": cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
"test_score_thresh": cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
"test_nms_thresh": cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
"compute_cls_var": compute_cls_var,
"cls_var_loss": cls_var_loss,
"cls_var_num_samples": cls_var_num_samples,
"compute_bbox_cov": compute_bbox_cov,
"bbox_cov_dims": bbox_cov_dims,
"bbox_cov_loss": bbox_cov_loss,
"bbox_cov_type": bbox_cov_type,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,
"annealing_step": cfg.SOLVER.STEPS[1],
"bbox_cov_num_samples": bbox_cov_num_samples
# fmt: on
}
def forward(self, x):
"""
Returns:
Tensor: Nx(K+1) logits for each box
Tensor: Nx4 or Nx(Kx4) bounding box regression deltas.
Tensor: Nx(K+1) logits variance for each box.
Tensor: Nx4(10) or Nx(Kx4(10)) covariance matrix parameters. 4 if diagonal, 10 if full.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
# Compute logits variance if needed
if self.compute_cls_var:
score_vars = self.cls_var(x)
else:
score_vars = None
# Compute box covariance if needed
if self.compute_bbox_cov:
proposal_covs = self.bbox_cov(x)
else:
proposal_covs = None
return scores, proposal_deltas, score_vars, proposal_covs
def losses(self, predictions, proposals, current_step=0):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
current_step: current optimizer step. Used for losses with an annealing component.
"""
global device
pred_class_logits, pred_proposal_deltas, pred_class_logits_var, pred_proposal_covs = predictions
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
proposals_boxes = box_type.cat(
[p.proposal_boxes for p in proposals])
assert (
not proposals_boxes.tensor.requires_grad), "Proposals should not require gradients!"
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
gt_classes = cat([p.gt_classes for p in proposals], dim=0)
else:
proposals_boxes = Boxes(
torch.zeros(
0, 4, device=pred_proposal_deltas.device))
no_instances = len(proposals) == 0 # no instances found
# Compute Classification Loss
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_cls = 0.0 * F.cross_entropy(
pred_class_logits,
torch.zeros(
0,
dtype=torch.long,
device=pred_class_logits.device),
reduction="sum",)
else:
if self.compute_cls_var:
# Compute classification variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
if self.cls_var_loss == 'loss_attenuation':
num_samples = self.cls_var_num_samples
# Compute standard deviation
pred_class_logits_var = torch.sqrt(
torch.exp(pred_class_logits_var))
# Produce normal samples using logits as the mean and the standard deviation computed above
# Scales with GPU memory. 12 GB ---> 3 Samples per anchor for
# COCO dataset.
univariate_normal_dists = distributions.normal.Normal(
pred_class_logits, scale=pred_class_logits_var)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(num_samples,))
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
(pred_class_stochastic_logits.shape[1] * num_samples, pred_class_stochastic_logits.shape[2], -1))
pred_class_logits = pred_class_stochastic_logits.squeeze(
2)
# Produce copies of the target classes to match the number of
# stochastic samples.
gt_classes_target = torch.unsqueeze(gt_classes, 0)
gt_classes_target = torch.repeat_interleave(
gt_classes_target, num_samples, dim=0).view(
(gt_classes_target.shape[1] * num_samples, -1))
gt_classes_target = gt_classes_target.squeeze(1)
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes_target, reduction="mean")
elif self.cls_var_loss == 'evidential':
# ToDo: Currently does not provide any reasonable mAP Results
# (15% mAP)
# Assume dirichlet parameters are output.
alphas = get_dir_alphas(pred_class_logits)
# Get sum of all alphas
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Generate one hot vectors for ground truth
one_hot_vectors = torch.nn.functional.one_hot(
gt_classes, alphas.shape[1])
# Compute loss. This loss attempts to put all evidence on the
# correct location.
per_instance_loss = (
one_hot_vectors * (torch.digamma(dirichlet_s) - torch.digamma(alphas)))
# Compute KL divergence regularizer loss
estimated_dirichlet = torch.distributions.dirichlet.Dirichlet(
(alphas - 1.0) * (1.0 - one_hot_vectors) + 1.0)
uniform_dirichlet = torch.distributions.dirichlet.Dirichlet(
torch.ones_like(one_hot_vectors).type(torch.FloatTensor).to(device))
kl_regularization_loss = torch.distributions.kl.kl_divergence(
estimated_dirichlet, uniform_dirichlet)
# Compute final loss
annealing_multiplier = torch.min(
torch.as_tensor(
current_step /
self.annealing_step).to(device),
torch.as_tensor(1.0).to(device))
per_proposal_loss = per_instance_loss.sum(
1) + annealing_multiplier * kl_regularization_loss
# Compute evidence auxiliary loss
evidence_maximization_loss = smooth_l1_loss(
dirichlet_s,
100.0 *
torch.ones_like(dirichlet_s).to(device),
beta=self.smooth_l1_beta,
reduction='mean')
evidence_maximization_loss *= annealing_multiplier
# Compute final loss
foreground_loss = per_proposal_loss[(gt_classes >= 0) & (
gt_classes < pred_class_logits.shape[1] - 1)]
background_loss = per_proposal_loss[gt_classes ==
pred_class_logits.shape[1] - 1]
loss_cls = (torch.mean(foreground_loss) + torch.mean(background_loss)
) / 2 + 0.01 * evidence_maximization_loss
else:
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes, reduction="mean")
# Compute regression loss:
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_box_reg = 0.0 * smooth_l1_loss(
pred_proposal_deltas,
torch.zeros_like(pred_proposal_deltas),
0.0,
reduction="sum",
)
else:
gt_proposal_deltas = self.box2box_transform.get_deltas(
proposals_boxes.tensor, gt_boxes.tensor
)
box_dim = gt_proposal_deltas.size(1) # 4 or 5
cls_agnostic_bbox_reg = pred_proposal_deltas.size(1) == box_dim
device = pred_proposal_deltas.device
bg_class_ind = pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds produces a valid loss of zero as long as the size_average
# arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
# and would produce a nan loss).
fg_inds = torch.nonzero(
(gt_classes >= 0) & (gt_classes < bg_class_ind), as_tuple=True
)[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for
# agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
fg_gt_classes = gt_classes[fg_inds]
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background
# classes.
gt_class_cols = box_dim * \
fg_gt_classes[:, None] + torch.arange(box_dim, device=device)
gt_covar_class_cols = self.bbox_cov_dims * \
fg_gt_classes[:, None] + torch.arange(self.bbox_cov_dims, device=device)
loss_reg_normalizer = gt_classes.numel()
pred_proposal_deltas = pred_proposal_deltas[fg_inds[:,
None], gt_class_cols]
gt_proposals_delta = gt_proposal_deltas[fg_inds]
if self.compute_bbox_cov:
pred_proposal_covs = pred_proposal_covs[fg_inds[:,
None], gt_covar_class_cols]
pred_proposal_covs = clamp_log_variance(pred_proposal_covs)
if self.bbox_cov_loss == 'negative_log_likelihood':
if self.bbox_cov_type == 'diagonal':
# Ger foreground proposals.
_proposals_boxes = proposals_boxes.tensor[fg_inds]
# Compute regression negative log likelihood loss according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
loss_box_reg = 0.5 * torch.exp(-pred_proposal_covs) * smooth_l1_loss(
pred_proposal_deltas, gt_proposals_delta, beta=self.smooth_l1_beta)
loss_covariance_regularize = 0.5 * pred_proposal_covs
loss_box_reg += loss_covariance_regularize
loss_box_reg = torch.sum(
loss_box_reg) / loss_reg_normalizer
else:
# Multivariate Gaussian Negative Log Likelihood loss using pytorch
# distributions.multivariate_normal.log_prob()
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs)
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky)
loss_box_reg = - \
multivariate_normal_dists.log_prob(gt_proposals_delta)
loss_box_reg = torch.sum(
loss_box_reg) / loss_reg_normalizer
elif self.bbox_cov_loss == 'second_moment_matching':
# Compute regression covariance using second moment
# matching.
loss_box_reg = smooth_l1_loss(pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta)
errors = (pred_proposal_deltas - gt_proposals_delta)
if self.bbox_cov_type == 'diagonal':
# Handel diagonal case
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_proposal_covs), errors ** 2, beta=self.smooth_l1_beta)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(
loss_box_reg) / loss_reg_normalizer
else:
# Handel full covariance case
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(
errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(
forecaster_cholesky, 2, 1))
second_moment_matching_term = smooth_l1_loss(
predicted_covar, gt_error_covar, beta=self.smooth_l1_beta, reduction='sum')
loss_box_reg = (
torch.sum(loss_box_reg) + second_moment_matching_term) / loss_reg_normalizer
elif self.bbox_cov_loss == 'energy_loss':
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs)
# Define per-anchor Distributions
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,))
distributions_samples_1 = distributions_samples[0:self.bbox_cov_num_samples, :, :]
distributions_samples_2 = distributions_samples[1:
self.bbox_cov_num_samples + 1, :, :]
# Compute energy score
loss_covariance_regularize = - smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum") / self.bbox_cov_num_samples # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
gt_proposals_delta.unsqueeze(0), self.bbox_cov_num_samples, dim=0)
loss_first_moment_match = 2.0 * smooth_l1_loss(
distributions_samples_1,
gt_proposals_delta_samples,
beta=self.smooth_l1_beta,
reduction="sum") / self.bbox_cov_num_samples # First term
# Final Loss
loss_box_reg = (
loss_first_moment_match + loss_covariance_regularize) / loss_reg_normalizer
else:
raise ValueError(
'Invalid regression loss name {}.'.format(
self.bbox_cov_loss))
# Perform loss annealing. Not really essential in Generalized-RCNN case, but good practice for more
# elaborate regression variance losses.
standard_regression_loss = smooth_l1_loss(pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",)
standard_regression_loss = standard_regression_loss / loss_reg_normalizer
probabilistic_loss_weight = get_probabilistic_loss_weight(
current_step, self.annealing_step)
loss_box_reg = (1.0 - probabilistic_loss_weight) * \
standard_regression_loss + probabilistic_loss_weight * loss_box_reg
else:
loss_box_reg = smooth_l1_loss(pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",)
loss_box_reg = loss_box_reg / loss_reg_normalizer
return {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
def inference(self, predictions, proposals):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_boxes_for_gt_classes(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
scores, proposal_deltas = predictions
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
N, B = proposal_boxes.shape
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
K = predict_boxes.shape[1] // B
if K > 1:
gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes = gt_classes.clamp_(0, K - 1)
predict_boxes = predict_boxes.view(N, K, B)[torch.arange(
N, dtype=torch.long, device=predict_boxes.device), gt_classes]
num_prop_per_image = [len(p) for p in proposals]
return predict_boxes.split(num_prop_per_image)
def predict_boxes(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
_, proposal_deltas, _, _ = predictions
num_prop_per_image = [len(p) for p in proposals]
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
return predict_boxes.split(num_prop_per_image)
def predict_probs(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
scores, _, _, _ = predictions
num_inst_per_image = [len(p) for p in proposals]
if self.cls_var_loss == "evidential":
alphas = get_dir_alphas(scores)
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Compute probabilities
probs = alphas / dirichlet_s
else:
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
# Todo: new detectron interface required copying code. Check for better
# way to inherit from FastRCNNConvFCHead.
@ROI_BOX_HEAD_REGISTRY.register()
class DropoutFastRCNNConvFCHead(nn.Module):
"""
A head with several 3x3 conv layers (each followed by norm & relu) and then
several fc layers (each followed by relu) and dropout.
"""
@configurable
def __init__(
self,
input_shape: ShapeSpec,
*,
conv_dims: List[int],
fc_dims: List[int],
conv_norm="",
dropout_rate
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature.
conv_dims (list[int]): the output dimensions of the conv layers
fc_dims (list[int]): the output dimensions of the fc layers
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
dropout_rate (float): p for dropout layer
"""
super().__init__()
assert len(conv_dims) + len(fc_dims) > 0
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self._output_size = (
input_shape.channels,
input_shape.height,
input_shape.width)
self.conv_norm_relus = []
for k, conv_dim in enumerate(conv_dims):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not conv_norm,
norm=get_norm(conv_norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (
conv_dim,
self._output_size[1],
self._output_size[2])
self.fcs = []
self.fcs_dropout = []
for k, fc_dim in enumerate(fc_dims):
fc = Linear(np.prod(self._output_size), fc_dim)
fc_dropout = nn.Dropout(p=self.dropout_rate)
self.add_module("fc{}".format(k + 1), fc)
self.add_module("fc_dropout{}".format(k + 1), fc_dropout)
self.fcs.append(fc)
self.fcs_dropout.append(fc_dropout)
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
@classmethod
def from_config(cls, cfg, input_shape):
num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
return {
"input_shape": input_shape,
"conv_dims": [conv_dim] * num_conv,
"fc_dims": [fc_dim] * num_fc,
"conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
}
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
for layer, dropout in zip(self.fcs, self.fcs_dropout):
x = F.relu(dropout(layer(x)))
return x
@property
def output_shape(self):
"""
Returns:
ShapeSpec: the output feature shape
"""
o = self._output_size
if isinstance(o, int):
return ShapeSpec(channels=o)
else:
return ShapeSpec(channels=o[0], height=o[1], width=o[2])
| 43,022 | 42.326284 | 130 | py |
probdet | probdet-master/src/probabilistic_modeling/probabilistic_detr.py | import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, distributions
# Detectron imports
from detectron2.modeling import META_ARCH_REGISTRY, detector_postprocess
# Detr imports
from models.detr import SetCriterion, MLP, DETR
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy)
# Project imports
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
@META_ARCH_REGISTRY.register()
class ProbabilisticDetr(META_ARCH_REGISTRY.get('Detr')):
"""
Implement Probabilistic Detr
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != 'none'
self.cls_var_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != 'none'
self.bbox_cov_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
self.bbox_cov_type = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
if self.bbox_cov_type == 'diagonal':
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.current_step = 0
self.annealing_step = cfg.SOLVER.STEPS[0]
# Create probabilistic output layers
self.detr = CustomDetr(self.detr.backbone,
self.detr.transformer,
num_classes=self.num_classes,
num_queries=self.detr.num_queries,
aux_loss=self.detr.aux_loss,
compute_cls_var=self.compute_cls_var,
compute_bbox_cov=self.compute_bbox_cov,
bbox_cov_dims=self.bbox_cov_dims)
self.detr.to(self.device)
losses = ['cardinality']
if self.compute_cls_var:
losses.append("labels_" + self.cls_var_loss)
else:
losses.append("labels")
if self.compute_bbox_cov:
losses.append("boxes_" + self.bbox_cov_loss)
else:
losses.append("boxes")
# Replace setcriterion with our own implementation
self.criterion = ProbabilisticSetCriterion(
self.num_classes,
matcher=self.criterion.matcher,
weight_dict=self.criterion.weight_dict,
eos_coef=self.criterion.eos_coef,
losses=losses)
self.criterion.set_bbox_cov_num_samples(self.bbox_cov_num_samples)
self.criterion.set_cls_var_num_samples(self.cls_var_num_samples)
self.criterion.to(self.device)
self.input_format = "RGB"
def forward(
self,
batched_inputs,
return_raw_results=False,
is_mc_dropout=False):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_raw_results (bool): if True return unprocessed results for probabilistic inference.
is_mc_dropout (bool): if True, return unprocessed results even if self.is_training flag is on.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
output = self.detr(images)
if self.training and not is_mc_dropout:
gt_instances = [
x["instances"].to(
self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
self.current_step += 1
return loss_dict
elif return_raw_results:
return output
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
mask_pred = output["pred_masks"] if self.mask_on else None
results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
class CustomDetr(DETR):
""" This is the DETR module that performs PROBABILISTIC object detection """
def __init__(self, backbone, transformer, num_classes,
num_queries,
aux_loss=False,
compute_cls_var=False,
compute_bbox_cov=False,
bbox_cov_dims=4):
super().__init__(backbone, transformer, num_classes, num_queries, aux_loss)
hidden_dim = self.transformer.d_model
self.compute_cls_var = compute_cls_var
if self.compute_cls_var:
self.class_var_embed = nn.Linear(hidden_dim, num_classes + 1)
nn.init.normal_(self.class_var_embed.weight, std=0.0001)
nn.init.constant_(self.class_var_embed.bias, 2 * np.log(0.01))
self.compute_bbox_cov = compute_bbox_cov
if self.compute_bbox_cov:
self.bbox_covar_embed = MLP(
hidden_dim, hidden_dim, bbox_cov_dims, 3)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask,
self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
# Only change to detr code happens here. We need to expose the features from
# the transformer to compute variance parameters.
out = {'pred_logits': outputs_class[-1],
'pred_boxes': outputs_coord[-1]}
if self.compute_cls_var:
cls_var_out = self.class_var_embed(hs[-1])
out.update({'pred_logits_var': cls_var_out})
if self.compute_bbox_cov:
bbox_cov_out = self.bbox_covar_embed(hs[-1])
out.update({'pred_boxes_cov': bbox_cov_out})
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(
outputs_class, outputs_coord)
return out
class ProbabilisticSetCriterion(SetCriterion):
"""
This is custom set criterion to allow probabilistic estimates
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
super().__init__(num_classes, matcher, weight_dict, eos_coef, losses)
self.probabilistic_loss_weight = 0.0
self.bbox_cov_num_samples = 1000
self.cls_var_num_samples = 1000
def set_bbox_cov_num_samples(self, bbox_cov_num_samples):
self.bbox_cov_num_samples = bbox_cov_num_samples
def set_cls_var_num_samples(self, cls_var_num_samples):
self.cls_var_num_samples = cls_var_num_samples
def loss_labels_att(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL + Loss attenuation)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
outputs must contain the mean pred_logits and the variance pred_logits_var
"""
if 'pred_logits_var' not in outputs:
return self.loss_labels(outputs, targets, indices, num_boxes, log)
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
src_logits_var = outputs['pred_logits_var']
src_logits_var = torch.sqrt(torch.exp(src_logits_var))
univariate_normal_dists = distributions.normal.Normal(
src_logits, scale=src_logits_var)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(self.cls_var_num_samples,))
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
pred_class_stochastic_logits.shape[1],
pred_class_stochastic_logits.shape[2] * pred_class_stochastic_logits.shape[0],
-1)
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J]
for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
target_classes = torch.unsqueeze(target_classes, dim=0)
target_classes = torch.repeat_interleave(
target_classes, self.cls_var_num_samples, dim=0)
target_classes = target_classes.view(
target_classes.shape[1],
target_classes.shape[2] *
target_classes.shape[0])
loss_ce = F.cross_entropy(
pred_class_stochastic_logits.transpose(
1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this
# one here
losses['class_error'] = 100 - \
accuracy(src_logits[idx], target_classes_o)[0]
return losses
def loss_boxes_var_nll(
self,
outputs,
targets,
indices,
num_boxes):
"""Compute the losses related to the bounding boxes, the nll probabilistic regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if 'pred_boxes_cov' not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
src_vars = clamp_log_variance(outputs['pred_boxes_cov'][idx])
target_boxes = torch.cat([t['boxes'][i]
for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
if src_vars.shape[1] == 4:
loss_nll = 0.5 * torch.exp(-src_vars) * loss_bbox + 0.5 * src_vars
else:
forecaster_cholesky = covariance_output_to_cholesky(
src_vars)
if forecaster_cholesky.shape[0] != 0:
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
src_boxes, scale_tril=forecaster_cholesky)
loss_nll = - \
multivariate_normal_dists.log_prob(target_boxes)
else:
loss_nll = loss_bbox
loss_nll_final = loss_nll.sum() / num_boxes
# Collect all losses
losses = dict()
losses['loss_bbox'] = loss_nll_final
# Add iou loss
losses = update_with_iou_loss(
losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_boxes_energy(
self,
outputs,
targets,
indices,
num_boxes):
"""Compute the losses related to the bounding boxes, the energy distance loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if 'pred_boxes_cov' not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i]
for t, (_, i) in zip(targets, indices)], dim=0)
# Begin probabilistic loss computation
src_vars = clamp_log_variance(outputs['pred_boxes_cov'][idx])
forecaster_cholesky = covariance_output_to_cholesky(
src_vars)
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
src_boxes, scale_tril=forecaster_cholesky)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,))
distributions_samples_1 = distributions_samples[0:self.bbox_cov_num_samples, :, :]
distributions_samples_2 = distributions_samples[1:
self.bbox_cov_num_samples + 1, :, :]
# Compute energy score. Smooth L1 loss is preferred in this case to
# maintain the proper scoring properties.
loss_covariance_regularize = - F.l1_loss(
distributions_samples_1,
distributions_samples_2,
reduction="sum") / self.bbox_cov_num_samples # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
target_boxes.unsqueeze(0), self.bbox_cov_num_samples, dim=0)
loss_first_moment_match = 2 * F.l1_loss(
distributions_samples_1,
gt_proposals_delta_samples,
reduction="sum") / self.bbox_cov_num_samples # First term
loss_energy = loss_first_moment_match + loss_covariance_regularize
# Normalize and add losses
loss_energy_final = loss_energy.sum() / num_boxes
# Collect all losses
losses = dict()
losses['loss_bbox'] = loss_energy_final
# Add iou loss
losses = update_with_iou_loss(
losses, src_boxes, target_boxes, num_boxes)
return losses
def loss_boxes_smm(
self,
outputs,
targets,
indices,
num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss, SMM variance and Covariance loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if 'pred_boxes_cov' not in outputs:
return self.loss_boxes(outputs, targets, indices, num_boxes)
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i]
for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
# Begin probabilistic loss computation
src_vars = clamp_log_variance(outputs['pred_boxes_cov'][idx])
errors = (src_boxes - target_boxes)
if src_vars.shape[1] == 4:
second_moment_matching_term = F.l1_loss(
torch.exp(src_vars), errors ** 2, reduction='none')
else:
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(
errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
src_vars)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(
forecaster_cholesky, 2, 1))
second_moment_matching_term = F.l1_loss(
predicted_covar, gt_error_covar, reduction='none')
loss_smm = second_moment_matching_term.sum() / num_boxes
# Normalize and add losses
loss_bbox_final = loss_bbox.sum() / num_boxes
loss_smm_final = loss_smm + loss_bbox_final
# Collect all losses
losses = dict()
losses['loss_bbox'] = loss_smm_final
# Add iou loss
losses = update_with_iou_loss(
losses, src_boxes, target_boxes, num_boxes)
return losses
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'labels_loss_attenuation': self.loss_labels_att,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'boxes_negative_log_likelihood': self.loss_boxes_var_nll,
'boxes_energy_loss': self.loss_boxes_energy,
'boxes_second_moment_matching': self.loss_boxes_smm,
'masks': self.loss_masks,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def update_with_iou_loss(losses, src_boxes, target_boxes, num_boxes):
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
| 18,746 | 39.403017 | 135 | py |
probdet | probdet-master/src/offline_evaluation/compute_probabilistic_metrics.py | import numpy as np
import os
import torch
import pickle
from prettytable import PrettyTable
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
# Project imports
from core.evaluation_tools import evaluation_utils
from core.evaluation_tools import scoring_rules
from core.evaluation_tools.evaluation_utils import get_test_thing_dataset_id_to_train_contiguous_id_dict
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(
args,
cfg=None,
iou_min=None,
iou_correct=None,
min_allowed_score=None,
print_results=True):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get thresholds to perform evaluation on
if iou_min is None:
iou_min = args.iou_min
if iou_correct is None:
iou_correct = args.iou_correct
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on OpenImages.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset).thing_dataset_id_to_contiguous_id
cat_mapping_dict = get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id)
# Get matched results by either generating them or loading from file.
with torch.no_grad():
matched_results = evaluation_utils.get_matched_results(
cfg, inference_output_dir,
iou_min=iou_min,
iou_correct=iou_correct,
min_allowed_score=min_allowed_score)
# Build preliminary dicts required for computing classification scores.
for matched_results_key in matched_results.keys():
if 'gt_cat_idxs' in matched_results[matched_results_key].keys():
# First we convert the written things indices to contiguous
# indices.
gt_converted_cat_idxs = matched_results[matched_results_key]['gt_cat_idxs'].squeeze(
1)
gt_converted_cat_idxs = torch.as_tensor([cat_mapping_dict[class_idx.cpu(
).tolist()] for class_idx in gt_converted_cat_idxs]).to(device)
matched_results[matched_results_key]['gt_converted_cat_idxs'] = gt_converted_cat_idxs.to(
device)
if 'predicted_cls_probs' in matched_results[matched_results_key].keys(
):
predicted_cls_probs = matched_results[matched_results_key]['predicted_cls_probs']
# This is required for evaluation of retinanet based
# detections.
matched_results[matched_results_key]['predicted_score_of_gt_category'] = torch.gather(
predicted_cls_probs, 1, gt_converted_cat_idxs.unsqueeze(1)).squeeze(1)
matched_results[matched_results_key]['gt_cat_idxs'] = gt_converted_cat_idxs
else:
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
# For false positives, the correct category is background. For retinanet, since no explicit
# background category is available, this value is computed as 1.0 - score of the predicted
# category.
predicted_class_probs, predicted_class_idx = matched_results[matched_results_key]['predicted_cls_probs'].max(
1)
matched_results[matched_results_key]['predicted_score_of_gt_category'] = 1.0 - \
predicted_class_probs
matched_results[matched_results_key]['predicted_cat_idxs'] = predicted_class_idx
else:
# For RCNN/DETR based networks, a background category is
# explicitly available.
matched_results[matched_results_key]['predicted_score_of_gt_category'] = matched_results[
matched_results_key]['predicted_cls_probs'][:, -1]
_, predicted_class_idx = matched_results[matched_results_key]['predicted_cls_probs'][:, :-1].max(
1)
matched_results[matched_results_key]['predicted_cat_idxs'] = predicted_class_idx
# Load the different detection partitions
true_positives = matched_results['true_positives']
duplicates = matched_results['duplicates']
localization_errors = matched_results['localization_errors']
false_negatives = matched_results['false_negatives']
false_positives = matched_results['false_positives']
# Get the number of elements in each partition
num_true_positives = true_positives['predicted_box_means'].shape[0]
num_duplicates = duplicates['predicted_box_means'].shape[0]
num_localization_errors = localization_errors['predicted_box_means'].shape[0]
num_false_negatives = false_negatives['gt_box_means'].shape[0]
num_false_positives = false_positives['predicted_box_means'].shape[0]
per_class_output_list = []
for class_idx in cat_mapping_dict.values():
true_positives_valid_idxs = true_positives['gt_converted_cat_idxs'] == class_idx
localization_errors_valid_idxs = localization_errors['gt_converted_cat_idxs'] == class_idx
duplicates_valid_idxs = duplicates['gt_converted_cat_idxs'] == class_idx
false_positives_valid_idxs = false_positives['predicted_cat_idxs'] == class_idx
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
# Compute classification metrics for every partition
true_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
true_positives, true_positives_valid_idxs)
localization_errors_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
localization_errors, localization_errors_valid_idxs)
duplicates_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
duplicates, duplicates_valid_idxs)
false_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
false_positives, false_positives_valid_idxs)
else:
# Compute classification metrics for every partition
true_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
true_positives, true_positives_valid_idxs)
localization_errors_cls_analysis = scoring_rules.softmax_compute_cls_scores(
localization_errors, localization_errors_valid_idxs)
duplicates_cls_analysis = scoring_rules.softmax_compute_cls_scores(
duplicates, duplicates_valid_idxs)
false_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
false_positives, false_positives_valid_idxs)
# Compute regression metrics for every partition
true_positives_reg_analysis = scoring_rules.compute_reg_scores(
true_positives, true_positives_valid_idxs)
localization_errors_reg_analysis = scoring_rules.compute_reg_scores(
localization_errors, localization_errors_valid_idxs)
duplicates_reg_analysis = scoring_rules.compute_reg_scores(
duplicates, duplicates_valid_idxs)
false_positives_reg_analysis = scoring_rules.compute_reg_scores_fn(
false_positives, false_positives_valid_idxs)
per_class_output_list.append(
{'true_positives_cls_analysis': true_positives_cls_analysis,
'true_positives_reg_analysis': true_positives_reg_analysis,
'localization_errors_cls_analysis': localization_errors_cls_analysis,
'localization_errors_reg_analysis': localization_errors_reg_analysis,
'duplicates_cls_analysis': duplicates_cls_analysis,
'duplicates_reg_analysis': duplicates_reg_analysis,
'false_positives_cls_analysis': false_positives_cls_analysis,
'false_positives_reg_analysis': false_positives_reg_analysis})
final_accumulated_output_dict = dict()
final_average_output_dict = dict()
for key in per_class_output_list[0].keys():
average_output_dict = dict()
for inner_key in per_class_output_list[0][key].keys():
collected_values = [per_class_output[key][inner_key] if per_class_output[key][
inner_key] is not None else np.NaN for per_class_output in per_class_output_list]
collected_values = np.array(collected_values)
if key in average_output_dict.keys():
# Use nan mean since some classes do not have duplicates for
# instance or has one duplicate for instance. torch.std returns nan in that case
# so we handle those here. This should not have any effect on the final results, as
# it only affects inter-class variance which we do not
# report anyways.
average_output_dict[key].update(
{inner_key: np.nanmean(collected_values),
inner_key + '_std': np.nanstd(collected_values, ddof=1)})
final_accumulated_output_dict[key].update(
{inner_key: collected_values})
else:
average_output_dict.update(
{key: {inner_key: np.nanmean(collected_values),
inner_key + '_std': np.nanstd(collected_values, ddof=1)}})
final_accumulated_output_dict.update(
{key: {inner_key: collected_values}})
final_average_output_dict.update(average_output_dict)
final_accumulated_output_dict.update(
{
"num_instances": {
"num_true_positives": num_true_positives,
"num_duplicates": num_duplicates,
"num_localization_errors": num_localization_errors,
"num_false_positives": num_false_positives,
"num_false_negatives": num_false_negatives}})
if print_results:
# Summarize and print all
table = PrettyTable()
table.field_names = (['Output Type',
'Number of Instances',
'Cls Negative Log Likelihood',
'Cls Brier Score',
'Reg TP Negative Log Likelihood / FP Entropy',
'Reg Energy Score'])
table.add_row(
[
"True Positives:",
num_true_positives,
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['true_positives_cls_analysis']['ignorance_score_mean'],
final_average_output_dict['true_positives_cls_analysis']['ignorance_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['true_positives_cls_analysis']['brier_score_mean'],
final_average_output_dict['true_positives_cls_analysis']['brier_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['true_positives_reg_analysis']['ignorance_score_mean'],
final_average_output_dict['true_positives_reg_analysis']['ignorance_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['true_positives_reg_analysis']['energy_score_mean'],
final_average_output_dict['true_positives_reg_analysis']['energy_score_mean_std'])])
table.add_row(
[
"Duplicates:",
num_duplicates,
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['duplicates_cls_analysis']['ignorance_score_mean'],
final_average_output_dict['duplicates_cls_analysis']['ignorance_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['duplicates_cls_analysis']['brier_score_mean'],
final_average_output_dict['duplicates_cls_analysis']['brier_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['duplicates_reg_analysis']['ignorance_score_mean'],
final_average_output_dict['duplicates_reg_analysis']['ignorance_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['duplicates_reg_analysis']['energy_score_mean'],
final_average_output_dict['duplicates_reg_analysis']['energy_score_mean_std'])])
table.add_row(
[
"Localization Errors:",
num_localization_errors,
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['localization_errors_cls_analysis']['ignorance_score_mean'],
final_average_output_dict['localization_errors_cls_analysis']['ignorance_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['localization_errors_cls_analysis']['brier_score_mean'],
final_average_output_dict['localization_errors_cls_analysis']['brier_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['localization_errors_reg_analysis']['ignorance_score_mean'],
final_average_output_dict['localization_errors_reg_analysis']['ignorance_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['localization_errors_reg_analysis']['energy_score_mean'],
final_average_output_dict['localization_errors_reg_analysis']['energy_score_mean_std'])])
table.add_row(
[
"False Positives:",
num_false_positives,
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['false_positives_cls_analysis']['ignorance_score_mean'],
final_average_output_dict['false_positives_cls_analysis']['ignorance_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['false_positives_cls_analysis']['brier_score_mean'],
final_average_output_dict['false_positives_cls_analysis']['brier_score_mean_std']),
'{:.4f} ± {:.4f}'.format(
final_average_output_dict['false_positives_reg_analysis']['total_entropy_mean'],
final_average_output_dict['false_positives_reg_analysis']['total_entropy_mean_std']),
'-'])
table.add_row(["False Negatives:",
num_false_negatives,
'-',
'-',
'-',
'-'])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_{}_{}_{}.txt'.format(
iou_min,
iou_correct,
min_allowed_score))
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir, 'probabilistic_scoring_res_{}_{}_{}.pkl'.format(
iou_min, iou_correct, min_allowed_score))
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(final_accumulated_output_dict, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 18,140 | 52.04386 | 129 | py |
probdet | probdet-master/src/offline_evaluation/compute_ood_probabilistic_metrics.py | import itertools
import os
import torch
import ujson as json
import pickle
from prettytable import PrettyTable
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools import scoring_rules
from core.evaluation_tools.evaluation_utils import eval_predictions_preprocess
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(
args,
cfg=None,
min_allowed_score=None):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET, and not on VOC.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get matched results by either generating them or loading from file.
with torch.no_grad():
try:
preprocessed_predicted_instances = torch.load(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_{}.pth".format(min_allowed_score)),
map_location=device)
# Process predictions
except FileNotFoundError:
prediction_file_name = os.path.join(
inference_output_dir,
'coco_instances_results.json')
predicted_instances = json.load(open(prediction_file_name, 'r'))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score=min_allowed_score, is_odd=True)
torch.save(
preprocessed_predicted_instances,
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_{}.pth".format(min_allowed_score)))
predicted_boxes = preprocessed_predicted_instances['predicted_boxes']
predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_boxes = list(itertools.chain.from_iterable(
[predicted_boxes[key] for key in predicted_boxes.keys()]))
predicted_cov_mats = list(itertools.chain.from_iterable(
[predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))
predicted_cls_probs = list(itertools.chain.from_iterable(
[predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))
num_false_positives = len(predicted_boxes)
valid_idxs = torch.as_tensor(
[i for i in range(num_false_positives)]).to(device)
predicted_boxes = torch.stack(predicted_boxes, 1).transpose(0, 1)
predicted_cov_mats = torch.stack(predicted_cov_mats, 1).transpose(0, 1)
predicted_cls_probs = torch.stack(
predicted_cls_probs,
1).transpose(
0,
1)
false_positives_dict = {
'predicted_box_means': predicted_boxes,
'predicted_box_covariances': predicted_cov_mats,
'predicted_cls_probs': predicted_cls_probs}
false_positives_reg_analysis = scoring_rules.compute_reg_scores_fn(
false_positives_dict, valid_idxs)
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
predicted_class_probs, predicted_class_idx = predicted_cls_probs.max(
1)
false_positives_dict['predicted_score_of_gt_category'] = 1.0 - \
predicted_class_probs
false_positives_cls_analysis = scoring_rules.sigmoid_compute_cls_scores(
false_positives_dict, valid_idxs)
else:
false_positives_dict['predicted_score_of_gt_category'] = predicted_cls_probs[:, -1]
_, predicted_class_idx = predicted_cls_probs[:, :-1].max(
1)
false_positives_cls_analysis = scoring_rules.softmax_compute_cls_scores(
false_positives_dict, valid_idxs)
# Summarize and print all
table = PrettyTable()
table.field_names = (['Output Type',
'Number of Instances',
'Cls Ignorance Score',
'Cls Brier/Probability Score',
'Reg Ignorance Score',
'Reg Energy Score'])
table.add_row(
[
"False Positives:",
num_false_positives,
'{:.4f}'.format(
false_positives_cls_analysis['ignorance_score_mean'],),
'{:.4f}'.format(
false_positives_cls_analysis['brier_score_mean']),
'{:.4f}'.format(
false_positives_reg_analysis['total_entropy_mean']),
'{:.4f}'.format(
false_positives_reg_analysis['fp_energy_score_mean'])])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_odd_{}.txt'.format(min_allowed_score))
with open(text_file_name, "w") as text_file:
print(table, file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir,
'probabilistic_scoring_res_odd_{}.pkl'.format(min_allowed_score))
false_positives_reg_analysis.update(false_positives_cls_analysis)
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(false_positives_reg_analysis, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 7,146 | 38.486188 | 116 | py |
probdet | probdet-master/src/offline_evaluation/compute_calibration_errors.py | import calibration as cal
import os
import pickle
import torch
from prettytable import PrettyTable
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.engine import launch
# Project imports
from core.evaluation_tools import evaluation_utils
from core.evaluation_tools.evaluation_utils import get_test_thing_dataset_id_to_train_contiguous_id_dict
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(
args,
cfg=None,
iou_min=None,
iou_correct=None,
min_allowed_score=None,
print_results=True):
# Setup config
if cfg is None:
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
# Setup torch device and num_threads
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Build path to gt instances and inference output
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get thresholds to perform evaluation on
if iou_min is None:
iou_min = args.iou_min
if iou_correct is None:
iou_correct = args.iou_correct
if min_allowed_score is None:
# Check if F-1 Score has been previously computed ON THE ORIGINAL
# DATASET such as COCO even when evaluating on OpenImages.
try:
train_set_inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
cfg.DATASETS.TEST[0],
args.inference_config,
0)
with open(os.path.join(train_set_inference_output_dir, "mAP_res.txt"), "r") as f:
min_allowed_score = f.read().strip('][\n').split(', ')[-1]
min_allowed_score = round(float(min_allowed_score), 4)
except FileNotFoundError:
# If not, process all detections. Not recommended as the results might be influenced by very low scoring
# detections that would normally be removed in robotics/vision
# applications.
min_allowed_score = 0.0
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset).thing_dataset_id_to_contiguous_id
cat_mapping_dict = get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id)
# Get matched results by either generating them or loading from file.
with torch.no_grad():
matched_results = evaluation_utils.get_matched_results(
cfg, inference_output_dir,
iou_min=iou_min,
iou_correct=iou_correct,
min_allowed_score=min_allowed_score)
# Build preliminary dicts required for computing classification scores.
for matched_results_key in matched_results.keys():
if 'gt_cat_idxs' in matched_results[matched_results_key].keys():
# First we convert the written things indices to contiguous
# indices.
gt_converted_cat_idxs = matched_results[matched_results_key]['gt_cat_idxs'].squeeze(
1)
gt_converted_cat_idxs = torch.as_tensor([cat_mapping_dict[class_idx.cpu(
).tolist()] for class_idx in gt_converted_cat_idxs]).to(device)
matched_results[matched_results_key]['gt_converted_cat_idxs'] = gt_converted_cat_idxs.to(
device)
matched_results[matched_results_key]['gt_cat_idxs'] = gt_converted_cat_idxs
if 'predicted_cls_probs' in matched_results[matched_results_key].keys(
):
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
# For false positives, the correct category is background. For retinanet, since no explicit
# background category is available, this value is computed as 1.0 - score of the predicted
# category.
predicted_class_probs, predicted_cat_idxs = matched_results[matched_results_key][
'predicted_cls_probs'].max(
1)
matched_results[matched_results_key]['output_logits'] = predicted_class_probs
else:
predicted_class_probs, predicted_cat_idxs = matched_results[
matched_results_key]['predicted_cls_probs'][:, :-1].max(1)
matched_results[matched_results_key]['predicted_cat_idxs'] = predicted_cat_idxs
# Load the different detection partitions
true_positives = matched_results['true_positives']
duplicates = matched_results['duplicates']
localization_errors = matched_results['localization_errors']
false_positives = matched_results['false_positives']
reg_maximum_calibration_error_list = []
reg_expected_calibration_error_list = []
if cfg.MODEL.META_ARCHITECTURE == 'ProbabilisticRetinaNet':
all_predicted_scores = torch.cat(
(true_positives['predicted_cls_probs'].flatten(),
duplicates['predicted_cls_probs'].flatten(),
localization_errors['predicted_cls_probs'].flatten(),
false_positives['predicted_cls_probs'].flatten()),
0)
all_gt_scores = torch.cat(
(torch.nn.functional.one_hot(
true_positives['gt_cat_idxs'],
true_positives['predicted_cls_probs'].shape[1]).flatten().to(device),
torch.nn.functional.one_hot(
duplicates['gt_cat_idxs'],
duplicates['predicted_cls_probs'].shape[1]).flatten().to(device),
torch.zeros_like(
localization_errors['predicted_cls_probs'].type(
torch.LongTensor).flatten()).to(device),
torch.zeros_like(
false_positives['predicted_cls_probs'].type(
torch.LongTensor).flatten()).to(device)),
0)
else:
# For RCNN based networks, a background category is
# explicitly available.
all_predicted_scores = torch.cat(
(true_positives['predicted_cls_probs'],
duplicates['predicted_cls_probs'],
localization_errors['predicted_cls_probs'],
false_positives['predicted_cls_probs']),
0)
all_gt_scores = torch.cat(
(true_positives['gt_cat_idxs'],
duplicates['gt_cat_idxs'],
torch.ones_like(
localization_errors['predicted_cls_probs'][:, 0]).fill_(80.0).type(
torch.LongTensor).to(device),
torch.ones_like(
false_positives['predicted_cls_probs'][:, 0]).fill_(80.0).type(
torch.LongTensor).to(device)), 0)
# Compute classification calibration error using calibration
# library
cls_marginal_calibration_error = cal.get_calibration_error(
all_predicted_scores.cpu().numpy(), all_gt_scores.cpu().numpy())
for class_idx in cat_mapping_dict.values():
true_positives_valid_idxs = true_positives['gt_converted_cat_idxs'] == class_idx
localization_errors_valid_idxs = localization_errors['gt_converted_cat_idxs'] == class_idx
duplicates_valid_idxs = duplicates['gt_converted_cat_idxs'] == class_idx
# Compute regression calibration errors. False negatives cant be evaluated since
# those do not have ground truth.
all_predicted_means = torch.cat(
(true_positives['predicted_box_means'][true_positives_valid_idxs],
duplicates['predicted_box_means'][duplicates_valid_idxs],
localization_errors['predicted_box_means'][localization_errors_valid_idxs]),
0)
all_predicted_covariances = torch.cat(
(true_positives['predicted_box_covariances'][true_positives_valid_idxs],
duplicates['predicted_box_covariances'][duplicates_valid_idxs],
localization_errors['predicted_box_covariances'][localization_errors_valid_idxs]),
0)
all_predicted_gt = torch.cat(
(true_positives['gt_box_means'][true_positives_valid_idxs],
duplicates['gt_box_means'][duplicates_valid_idxs],
localization_errors['gt_box_means'][localization_errors_valid_idxs]),
0)
all_predicted_covariances = torch.diagonal(
all_predicted_covariances, dim1=1, dim2=2)
# The assumption of uncorrelated components is not accurate, especially when estimating full
# covariance matrices. However, using scipy to compute multivariate cdfs is very very
# time consuming for such large amounts of data.
reg_maximum_calibration_error = []
reg_expected_calibration_error = []
# Regression calibration is computed for every box dimension
# separately, and averaged after.
for box_dim in range(all_predicted_gt.shape[1]):
all_predicted_means_current_dim = all_predicted_means[:, box_dim]
all_predicted_gt_current_dim = all_predicted_gt[:, box_dim]
all_predicted_covariances_current_dim = all_predicted_covariances[:, box_dim]
normal_dists = torch.distributions.Normal(
all_predicted_means_current_dim,
scale=torch.sqrt(all_predicted_covariances_current_dim))
all_predicted_scores = normal_dists.cdf(
all_predicted_gt_current_dim)
reg_calibration_error = []
histogram_bin_step_size = 1 / 15.0
for i in torch.arange(
0.0,
1.0 - histogram_bin_step_size,
histogram_bin_step_size):
# Get number of elements in bin
elements_in_bin = (
all_predicted_scores < (i + histogram_bin_step_size))
num_elems_in_bin_i = elements_in_bin.type(
torch.FloatTensor).to(device).sum()
# Compute calibration error from "Accurate uncertainties for deep
# learning using calibrated regression" paper.
reg_calibration_error.append(
(num_elems_in_bin_i / all_predicted_scores.shape[0] - (i + histogram_bin_step_size)) ** 2)
calibration_error = torch.stack(
reg_calibration_error).to(device)
reg_maximum_calibration_error.append(calibration_error.max())
reg_expected_calibration_error.append(calibration_error.mean())
reg_maximum_calibration_error_list.append(
reg_maximum_calibration_error)
reg_expected_calibration_error_list.append(
reg_expected_calibration_error)
# Summarize and print all
reg_expected_calibration_error = torch.stack([torch.stack(
reg, 0) for reg in reg_expected_calibration_error_list], 0)
reg_expected_calibration_error = reg_expected_calibration_error[
~torch.isnan(reg_expected_calibration_error)].mean()
reg_maximum_calibration_error = torch.stack([torch.stack(
reg, 0) for reg in reg_maximum_calibration_error_list], 0)
reg_maximum_calibration_error = reg_maximum_calibration_error[
~torch.isnan(reg_maximum_calibration_error)].mean()
if print_results:
table = PrettyTable()
table.field_names = (['Cls Marginal Calibration Error',
'Reg Expected Calibration Error',
'Reg Maximum Calibration Error'])
table.add_row([cls_marginal_calibration_error,
reg_expected_calibration_error.cpu().numpy().tolist(),
reg_maximum_calibration_error.cpu().numpy().tolist()])
print(table)
text_file_name = os.path.join(
inference_output_dir,
'calibration_errors_{}_{}_{}.txt'.format(
iou_min, iou_correct, min_allowed_score))
with open(text_file_name, "w") as text_file:
print([
cls_marginal_calibration_error,
reg_expected_calibration_error.cpu().numpy().tolist(),
reg_maximum_calibration_error.cpu().numpy().tolist()], file=text_file)
dictionary_file_name = os.path.join(
inference_output_dir, 'calibration_errors_res_{}_{}_{}.pkl'.format(
iou_min, iou_correct, min_allowed_score))
final_accumulated_output_dict = {
'cls_marginal_calibration_error': cls_marginal_calibration_error,
'reg_expected_calibration_error': reg_expected_calibration_error.cpu().numpy(),
'reg_maximum_calibration_error': reg_maximum_calibration_error.cpu().numpy()}
with open(dictionary_file_name, "wb") as pickle_file:
pickle.dump(final_accumulated_output_dict, pickle_file)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 14,207 | 45.736842 | 116 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/main.py | import torch
import torch.nn as nn
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
# from torchvision import models
# from transformers import AdamW
from BertModels import BertForMultipleClass, BertForBooleanQuestionYN ,BertForBooleanQuestionFR, BertForQuestionAnswering, BertForBooleanQuestionFR1, BertForBooleanQuestionFB,BertForBooleanQuestionFB1, BertForBooleanQuestionYNboolq ,BertForBooleanQuestionYN1 , BertForBooleanQuestionCO, BertForBooleanQuestionCO1, BertForMaskedLM, BertForTokenClassification, BertForBooleanQuestion3ClassYN, BertForMultipleClassLoad, BertForSequenceClassification, BertForSequenceClassification1, BertForSequenceClassification2, BertForSequenceClassification3, BertForBooleanQuestionYNsprlqa, BertForBooleanQuestionYNsprlqaLoad
# from BertModels import BertForBooleanQuestionYN ,BertForBooleanQuestionYN1
from PLModels import BertMultiTaskBooleanQuestion, BertMultiTaskMultipleClass, BertMultiTaskBooleanQuestionLoad, BertMultiTaskMultipleClassLoad, BertForSpatialRelationExtraction #, MultipleClass, BooleanQuestionYN ,BooleanQuestionFR#, MultipleClassLoad, BooleanQuestionLoad,
from XLNETModels import XLNETForQuestionAnswering, XLNETForBooleanQuestionFR, XLNETForBooleanQuestionFB, XLNETForBooleanQuestionYN , XLNETForBooleanQuestionCO
from ALBertModels import ALBertForQuestionAnswering, ALBertForBooleanQuestionFR, ALBertForBooleanQuestionFB, ALBertForBooleanQuestionYN , ALBertForBooleanQuestionCO
from BertSpatialQA import SpatialQA, SpatialQAaddSprl, SpatialQAaddSprlLoad,SpatialQAaddSprlTriplet, SpatialQASupervised, weights_init_normal
from Create_LM_input_output import initialize_tokenizer
from consistency_check import consistency, contrast
import matplotlib.pyplot as plt
#adding arguments
parser = argparse.ArgumentParser()
parser.add_argument("--research_hlr",help="change the location of files",action='store_true', default = True)
parser.add_argument("--result",help="Name of the result's saving file", type= str, default='test')
parser.add_argument("--result_folder",help="Name of the folder of the results file", type= str, default='transfer/Results')
parser.add_argument("--model",help="Name of the model's saving file", type= str, default='')
parser.add_argument("--model_folder",help="Name of the folder of the models file", type=str, default = "transfer/Models")
parser.add_argument("--old_experiments",help="from the spartun project some setting of models changes, so if you want to run the previous things, set this True", default = False, action='store_true')
parser.add_argument("--dataset",help="name of the dataset like mSpRL or spaceeval", type = str, default = 'spartqa')
parser.add_argument("--no_save",help="If save the model or not", action='store_true', default = False)
parser.add_argument("--load",help="For loading model", type=str)
parser.add_argument("--cuda",help="The index of cuda", type=int, default=None)
parser.add_argument("--qtype",help="Name of Question type. (FB, FR, CO, YN)", type=str, default = 'all')
parser.add_argument("--train10k",help="Train on 10k data for babi dataset", action='store_true', default = False)
parser.add_argument("--train1k",help="Train on 1k data for babi dataset", action='store_true', default = False)
parser.add_argument("--train24k",help="Train on 24k data", action='store_true', default = False)
parser.add_argument("--train100k",help="Train on 100k data", action='store_true', default = False)
parser.add_argument("--train500",help="Train on 500 data", action='store_true', default = False)
parser.add_argument("--unseentest",help="Test on unseen data", action='store_true', default = False)
parser.add_argument("--human",help="Train and Test on human data", action='store_true', default = False)
parser.add_argument("--humantest",help="Test on human data", action='store_true', default = False)
parser.add_argument("--dev_exists", help="If development set is used", action='store_true', default = False)
parser.add_argument("--test_track", help="track the test result during training", action='store_true', default = False)
parser.add_argument("--no_train",help="Number of train samples", action='store_true', default = False)
parser.add_argument("--save_data",help="save extracted data", action='store_true', default = False)
parser.add_argument("--baseline",help="Name of the baselines. Options are 'bert', 'xlnet', 'albert'", type=str, default = 'bert')
parser.add_argument("--pretrain",help="Name of the pretrained model. Options are 'bertqa', 'bertbc' (for bert boolean clasification), 'mlm', 'mlmr', 'tokencls'", type=str, default = 'bertbc')
parser.add_argument("--con",help="Testing consistency or contrast", type=str, default = 'not')
parser.add_argument("--optim",help="Type of optimizer. options 'sgd', 'adamw'.", type=str, default = 'adamw')
parser.add_argument("--loss",help="Type of loss function. options 'cross'.", type=str, default = 'focal')
parser.add_argument("--batch_size",help="size of batch. If none choose the whole example in one sample. If QA number of all questions if SIE number of sentences or triplets'.", type=int, default = 1)
parser.add_argument("--best_model",help="How to save the best model. based on aacuracy or f1 measure", type=str, default = 'accuracy')
parser.add_argument("--train",help="Number of train samples", type = int)
parser.add_argument("--train_log", help="save the log of train if true", default = False, action='store_true')
parser.add_argument("--start",help="The start number of train samples", type = int, default = 0)
parser.add_argument("--dev",help="Number of dev samples", type = int)
parser.add_argument("--test",help="Number of test samples", type = int)
parser.add_argument("--unseen",help="Number of unseen test samples", type = int)
parser.add_argument("--has_zero_eval", help="If True before starting the training have a test on the test set", default = False, action='store_true')
parser.add_argument("--stepgame_train_set",help="Number of sentence in stepgame dataset", type = str, default=None)
# parser.add_argument("--stepgame_dev_sets",help="Number of sentence in stepgame dataset", type = list, default=[12345])
parser.add_argument("--stepgame_test_set",help="Number of sentence in stepgame dataset", type = str, default="1 2 3 4 5 6 7 8 9 10")
parser.add_argument("--epochs",help="Number of epochs for training", type = int, default=0)
parser.add_argument("--lr",help="learning rate", type = float, default=2e-6)
parser.add_argument("--dropout", help="If you want to set dropout=0", action='store_true', default = False)
parser.add_argument("--unfreeze", help="freeze the first layeres of the model except this numbers", type=int, default = 0)
parser.add_argument("--seed", help="set seed for reproducible result", type=int, default = 1)
parser.add_argument("--other_var", dest='other_var', action='store', help="Other variable: classification (DK, noDK), random, fine-tune on unseen. for changing model load MLM from pre-trained model and replace other parts with new on", type=str)
parser.add_argument("--other_var2", dest='other_var2', action='store', help="Other variable: classification (DK, noDK), random, fine-tune on unseen. for changing model load MLM from pre-trained model and replace other parts with new on", type=str)
parser.add_argument("--detail",help="a description about the model", type = str)
#arguments for end2end models
parser.add_argument("--options", help="describe the model features: 'q+s' + 'first_attention_stoq' + 'just_pass_entity'+ '2nd_attention_stoq'+ '2nd_attention_qtos' + ", type=str, default=None)
parser.add_argument("--top_k_sent", help="set top k for sentence", type=int, default=None)
parser.add_argument("--top_k_s", help="set top k for indicator, entity, and triplets: 3#4#3", type=str, default=None)
parser.add_argument("--top_k_q", help="set top k for indicator, entity, and triplets: 3#4#3", type=str, default=None)
parser.add_argument("--cls_input_dim", help="an integer based on the final input of boolean classification", type=int, default=768)
args = parser.parse_args()
# os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(os.getcwd())
start_path = '/egr/research-hlr/' #'/tank/space/' #'/egr/research-hlr/' #if args.research_hlr else '/tank/space/'
if args.pretrain in ["tokencls", "sptypecls", "spcls", "sre"]:
result_adress = os.path.join(start_path+'rshnk/'+args.result_folder+ '/'+args.dataset+'/'+args.baseline+'_SIE/',args.result)
else:
result_adress = os.path.join(start_path+'rshnk/'+args.result_folder+ '/'+args.dataset+'/'+args.baseline+'/',args.result)
model_address = os.path.join(start_path+'rshnk/'+args.model_folder, args.dataset)
args.stepgame_test_set = [int(i) for i in args.stepgame_test_set.split(' ')]
if not os.path.exists(result_adress):
os.makedirs(result_adress)
if not os.path.exists(model_address):
os.makedirs(model_address)
#saved_file = open('results/train'+args.result+'.txt','w')
#choosing device
if torch.cuda.is_available():
print('Using ', torch.cuda.device_count() ,' GPU(s)')
mode = 'cuda:'+str(args.cuda) if args.cuda else 'cuda'
if args.seed:
torch.cuda.manual_seed(args.seed)
else:
print("WARNING: No GPU found. Using CPUs...")
mode = 'cpu'
device = torch.device(mode)
if args.seed:
print("set seeds.")
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def config():
f = open(result_adress+'/config.txt','w')
print('Configurations:\n', args , file=f)
f.close()
config()
epochs = args.epochs
if args.human: args.humantest = True
if args.train24k: train_num = 'train24k'
elif args.train100k: train_num = 'train100k'
elif args.train500: train_num = 'train500'
elif args.train1k: train_num = 'train1k'
elif args.train10k: train_num = 'train10k'
else: train_num = None
if args.model == '': args.model = args.result
if args.baseline == "roberta": pretrained_data = 'roberta-base'
elif args.baseline == "xlnet": pretrained_data = 'xlnet-base-cased'
elif args.baseline == "albert": pretrained_data = 'albert-base-v2'
else: pretrained_data = 'bert-base-uncased'
initialize_tokenizer(args.baseline, pretrained_data)
num_labels_YN = None
num_labels_FR = None
#calling test and train based on the task
if args.pretrain == 'tokencls':
if args.dataset == 'msprl':
from spInfo.msprl.train_tokencls_msprl import train
from spInfo.msprl.test_tokencls_msprl import test
elif args.dataset == 'spaceEval':
from spaceeval.train_tokencls_spaceEval import train
from spaceeval.test_tokencls_spaceEval import test
elif args.dataset == "stepgame":
from spInfo.stepgame.test_tokencls import test
else:
from spInfo.train_tokencls import train
from spInfo.test_tokencls import test
elif args.pretrain == "sre":
if args.dataset == "spartqa":
from SRE.train import train
from SRE.test import test
sre_num_labels = 12
elif args.pretrain == 'spcls' or args.pretrain == 'sptypecls':
if args.dataset == 'msprl':
from spInfo.msprl.train_spcls_msprl import train
from spInfo.msprl.test_spcls_msprl import test
elif args.dataset == "stepgame":
from spInfo.stepgame.test_spcls import test
else:
# if args.humantest:
# from spInfo.test_spcls_no_annot import test
# else:
from spInfo.train_spcls import train
from spInfo.test_spcls import test
elif args.pretrain == 'end2end':
if args.dataset == 'stepgame':
if args.other_var == 'addsprl':
from end2end.StepGame.train import train
from end2end.StepGame.test import test
elif args.dataset == 'sprlqa':
if args.other_var == 'addsprl':
from end2end.sprlqa.train import train
from end2end.sprlqa.test import test
else:
if args.other_var == 'supervised':
from end2end.train_sup import train
from end2end.test_sup import test
else:
from end2end.train import train
from end2end.test import test
elif args.pretrain == 'sptype+bertbc':
from QA_splinfo.train import train
from QA_splinfo.test import test
elif args.pretrain == 'sptypeQA':
from QA_splinfo.train_triplet import train
from QA_splinfo.test_triplet import test
else: #QA task
if args.dataset == 'boolq':
from boolq.train_boolQ import train
from boolq.test_boolQ import test
num_labels_YN = 2
elif args.dataset == 'babi':
from QA.babi.train import train
from QA.babi.test import test
if args.qtype in ["all", "YN"]:
num_labels_YN = 2
if args.qtype in ["all", "FR"]:
num_labels_FR = 4
elif args.dataset == 'sprlqa':
# from msprl.QA.train import train
# from msprl.QA.test import test
from QA.sprlqa.train import train
from QA.sprlqa.test import test
num_labels_YN = 2
elif args.dataset == 'stepgame':
from QA.StepGame.train import train
from QA.StepGame.test import test
num_labels_FR = 9
else:
if args.old_experiments:
from QA.trainold import train
from QA.testold import test
else:
from QA.train import train
from QA.test import test
if args.dataset == "spartqa":
if args.qtype in ["all", "YN"]:
num_labels_YN = 3
if args.qtype in ["all", "FR"]:
num_labels_FR = 7
else: #spartun
if args.qtype in ["all", "YN"]:
num_labels_YN = 2
if args.qtype in ["all", "FR"]:
num_labels_FR = 15
#model
# model = None
if args.load:
# print('/tank/space/rshnk/'+args.model_folder+'/'+args.load+'.th')
model = torch.load(start_path+'rshnk/'+args.model_folder+'/'+args.load+'.th', map_location={'cuda:0': 'cuda:'+str(args.cuda),'cuda:1': 'cuda:'+str(args.cuda),'cuda:2': 'cuda:'+str(args.cuda),'cuda:3': 'cuda:'+str(args.cuda), 'cuda:5': 'cuda:'+str(args.cuda), 'cuda:4': 'cuda:'+str(args.cuda), 'cuda:6': 'cuda:'+str(args.cuda),'cuda:7': 'cuda:'+str(args.cuda)})
# model.to(device)
if args.unfreeze:
if args.baseline == 'bert':
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)]:
# print('I will be frozen: {}'.format(name))
param.requires_grad = False
if args.other_var == 'change_model' or args.other_var2 == 'change_model':
pretrained_dict = model.state_dict()
if args.pretrain == 'bertbc':
if args.old_experiments:
if args.qtype == 'YN':
if args.baseline == 'bert':
if args.dataset == 'spartqa':
model2 = BertForBooleanQuestionYN1.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.dataset == 'sprlqa':
model2 = BertForBooleanQuestionYNsprlqaLoad.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.qtype == 'FB':
if args.baseline == 'bert':
model2 = BertForBooleanQuestionFB1.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.qtype == 'FR':
if args.baseline == 'bert':
model2 = BertForBooleanQuestionFR1.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.qtype == 'CO':
if args.baseline == 'bert':
model2 = BertForBooleanQuestionCO1.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
else:
if args.baseline == 'bert':
model2 = BertMultiTaskBooleanQuestionLoad.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, num_labels_YN = num_labels_YN, num_labels_FR= num_labels_FR, dataset = "human" if args.human else args.dataset, LM = args.baseline, has_batch = True if args.batch_size and args.batch_size>1 else False, criterion = args.loss)
elif args.pretrain == 'bertmc':
if args.old_experiments:
if args.qtype == 'YN':
if args.baseline == 'bert':
model2 = BertForMultipleClassLoad.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.qtype == 'FR':
if args.baseline == 'bert':
model2 = BertForMultipleClassLoad.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, qtype = 'FR', num_classes = 9)
else:
if args.baseline == 'bert':
model2 = BertMultiTaskMultipleClassLoad.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, num_classes_YN = num_labels_YN , num_classes_FR= num_labels_FR, dataset = "human" if args.human else args.dataset, LM = args.baseline , has_batch = True if args.batch_size and args.batch_size>1 else False, criterion = args.loss)
elif args.pretrain == 'sptypecls':
if args.human:
model2 = BertForSequenceClassification2.from_pretrained(pretrained_data, num_labels = 1, type_class = 11 , device = device, no_dropout= args.dropout)
if args.baseline == 'bert' and args.dataset == 'msprl':
model2 = BertForSequenceClassification2.from_pretrained(pretrained_data, num_labels = 1, type_class = 23 , device = device, no_dropout= args.dropout)
elif args.pretrain == 'end2end':
if args.qtype == 'YN': qa_num_labels = 2
elif args.qtype == 'FR': qa_num_labels = 7
elif args.qtype == 'CO': qa_num_labels = 2
elif args.qtype == 'FB': qa_num_labels = 3
else: qa_num_labels = None
if args.baseline == 'bert':
drop = 0 if args.dropout else 0.1
if args.other_var == 'addsprl':
# model2 = SpatialQAaddSprl(no_dropout=drop, qa_num_labels = qa_num_labels, rel_type_num = 11, qtype = args.qtype, device = device, unfreeze = args.unfreeze, top_k_s= args.top_k_s.split('#') if args.top_k_s else None, top_k_q= args.top_k_q.split('#') if args.top_k_q else None, options= args.options, cls_input_dim = args.cls_input_dim)
model2 = SpatialQAaddSprlLoad(no_dropout=drop, qa_num_labels = qa_num_labels, rel_type_num = 11, qtype = args.qtype, device = device, unfreeze = args.unfreeze, top_k_sent= args.top_k_sent, top_k_s= args.top_k_s.split('#') if args.top_k_s else None, top_k_q= args.top_k_q.split('#') if args.top_k_q else None, options= args.options, cls_input_dim = args.cls_input_dim)
if args.baseline == 'bert':
if args.unfreeze:
for name, param in list(model2.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
model_dict = model2.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# print(pretrained_dict.keys())
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# # 3. load the new state dict
model2.load_state_dict(model_dict)
model = model2
model.to(device)
else:
if args.pretrain == 'bertqa': # for FA
if args.baseline == 'bert':
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
elif args.baseline == 'albert':
model = ALBertForQuestionAnswering.from_pretrained(pretrained_data, device = device)
elif args.baseline == 'xlnet':
model = XLNETForQuestionAnswering.from_pretrained(pretrained_data, device = device)
model.to(device)
elif args.pretrain == 'mlm' or args.pretrain =='mlmr':
if args.baseline == 'bert':
drop = 0 if args.dropout else 0.1
#bert-large-uncased-whole-word-masking-finetuned-squad
# bert-base-uncased
model = BertForMaskedLM.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
model.to(device)
elif args.pretrain == 'end2end':
if args.qtype == 'YN':
if args.dataset == 'sprlqa': qa_num_labels = 2
else: qa_num_labels = 3
elif args.qtype == 'FR':
if args.dataset != "stepgame": qa_num_labels = 7
else: qa_num_labels = 9
elif args.qtype == 'CO': qa_num_labels = 2
elif args.qtype == 'FB': qa_num_labels = 3
else: qa_num_labels = None
if args.baseline == 'bert':
drop = 0 if args.dropout else 0.1
if args.other_var == 'addsprl':
# model = SpatialQAaddSprl(no_dropout=drop, qa_num_labels = qa_num_labels, rel_type_num = 11, qtype = args.qtype, device = device, unfreeze = args.unfreeze, top_k_s= args.top_k_s.split('#') if args.top_k_s else None, top_k_q= args.top_k_q.split('#') if args.top_k_q else None, options= args.options, cls_input_dim = args.cls_input_dim)
model = SpatialQAaddSprl(no_dropout=drop, qa_num_labels = qa_num_labels, rel_type_num = 11, qtype = args.qtype, device = device, unfreeze = args.unfreeze, top_k_sent= args.top_k_sent, top_k_s= args.top_k_s.split('#') if args.top_k_s else None, top_k_q= args.top_k_q.split('#') if args.top_k_q else None, options= args.options, cls_input_dim = args.cls_input_dim)
elif args.other_var == 'supervised':
model = SpatialQASupervised(no_dropout=drop, qa_num_labels = qa_num_labels, rel_type_num = 11, qtype = args.qtype, device = device, unfreeze = args.unfreeze)
else:
model = SpatialQA(no_dropout=drop, qa_num_labels = qa_num_labels, rel_type_num = 11, qtype = args.qtype, device = device, unfreeze = args.unfreeze)
model.to(device)
elif args.pretrain == "sre":
model = BertForSpatialRelationExtraction( no_dropout = drop, num_labels = sre_num_labels, device =device)
model.to(device)
elif args.pretrain == 'sptypeQA':
if args.qtype == 'YN': qa_num_labels = 2
elif args.qtype == 'FR': qa_num_labels = 7
elif args.qtype == 'CO': qa_num_labels = 2
elif args.qtype == 'FB': qa_num_labels = 3
else: qa_num_labels = None
if args.baseline == 'bert':
drop = 0 if args.dropout else 0.1
model = SpatialQAaddSprlTriplet(no_dropout=drop, qa_num_labels = qa_num_labels, rel_type_num = 11, qtype = args.qtype, device = device, unfreeze = args.unfreeze)
model.to(device)
elif args.pretrain == 'tokencls':
if args.baseline == 'bert':
drop = 0 if args.dropout else 0.1
model = BertForTokenClassification.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True, num_labels = 5)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
model.to(device)
elif args.pretrain == 'spcls':
if args.baseline == 'bert':
drop = 0 if args.dropout else 0.1
model = BertForSequenceClassification.from_pretrained(pretrained_data, num_labels = 1, device = device, no_dropout= args.dropout)
# model = BertForSequenceClassification.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True, num_labels = 1)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
model.to(device)
elif args.pretrain == 'sptypecls':
if args.baseline == 'bert':
# drop = 0 if args.dropout else 0.1
if args.dataset == 'msprl':
model = BertForSequenceClassification3.from_pretrained(pretrained_data, num_labels = 1, type_class = 23 , device = device, no_dropout= args.dropout)
elif args.dataset == 'spaceEval':
model = BertForSequenceClassification1.from_pretrained(pretrained_data, num_labels = 1, type_class = 22 , device = device, no_dropout= args.dropout)
elif args.dataset == 'spartun':
model = BertForSequenceClassification3.from_pretrained(pretrained_data, num_labels = 1, type_class = 16 , device = device, no_dropout= args.dropout)
else:
model = BertForSequenceClassification3.from_pretrained(pretrained_data, num_labels = 1, type_class = 11 , device = device, no_dropout= args.dropout)
# model = BertForSequenceClassification.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True, num_labels = 1)
#unfreeze the layers
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
model.to(device)
elif args.pretrain == 'bertmc':
if args.old_experiments:
if args.qtype == 'YN':
# drop = 0 if args.dropout else 0.1
if args.baseline == 'bert':
model = BertForMultipleClass.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
# model = BertForMultipleChoice.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
elif args.qtype == 'FR':
# drop = 0 if args.dropout else 0.1
if args.baseline == 'bert':
if args.dataset == 'stepgame':
model = BertForMultipleClass.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, qtype = args.qtype, num_classes = 9)
# model = BertForMultipleChoice.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
else:
#using PLModels file
model = BertMultiTaskMultipleClass.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, num_classes_YN = num_labels_YN , num_classes_FR= num_labels_FR, dataset = "human" if args.human else args.dataset, LM = args.baseline , has_batch = True if args.batch_size and args.batch_size>1 else False, criterion = args.loss)
# model = BertForMultipleChoice.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True)
if args.unfreeze:
if args.baseline == "bert":
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
model.to(device)
elif args.pretrain == 'bertbc' or args.pretrain == 'sptype+bertbc':
if args.old_experiments:
if args.qtype == 'FR':
if args.baseline == 'bert':
if args.dataset == "babi":
model = BertForBooleanQuestionFR.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, num_labels = 4)
else:
model = BertForBooleanQuestionFR.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
elif args.baseline == 'albert':
model = ALBertForBooleanQuestionFR.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.baseline == 'xlnet':
model = XLNETForBooleanQuestionFR.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.qtype == 'FB':
if args.baseline == 'bert':
model = BertForBooleanQuestionFB.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
elif args.baseline == 'albert':
model = ALBertForBooleanQuestionFB.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.baseline == 'xlnet':
model = XLNETForBooleanQuestionFB.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.qtype == 'YN' and args.other_var == 'DK':
if args.baseline == 'bert':
model = BertForBooleanQuestion3ClassYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
# elif args.type =='YN' and args.other_var == 'YN1':
# if args.baseline == 'bert':
# model = BertForBooleanQuestionYN1.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
# model.to(device)
elif args.qtype == 'YN' and args.dataset == 'boolq':
if args.baseline == 'bert':
model = BertForBooleanQuestionYNboolq.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
elif args.qtype == 'YN' and (args.dataset == 'sprlqa' or args.dataset == 'spartun'):
if args.baseline == 'bert':
model = BertForBooleanQuestionYNsprlqa.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
elif args.qtype == 'YN':
if args.baseline == 'bert':
model = BertForBooleanQuestionYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
elif args.baseline == 'albert':
model = ALBertForBooleanQuestionYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.baseline == 'xlnet':
model = XLNETForBooleanQuestionYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.qtype == 'CO':
if args.baseline == 'bert':
model = BertForBooleanQuestionCO.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
if args.unfreeze:
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
elif args.baseline == 'albert':
model = ALBertForBooleanQuestionCO.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
elif args.baseline == 'xlnet':
model = XLNETForBooleanQuestionCO.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
else:
# if args.qtype == "FR":
# model = BooleanQuestionFR.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, num_labels_FR= num_labels_FR, dataset = "human" if args.human else args.dataset)
# elif args.qtype == "YN":
# model = BooleanQuestionYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, num_labels_YN= num_labels_YN, dataset = "human" if args.human else args.dataset)
model = BertMultiTaskBooleanQuestion.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout, num_labels_YN = num_labels_YN, num_labels_FR= num_labels_FR, dataset = "human" if args.human else args.dataset, LM = args.baseline, has_batch = True if args.batch_size and args.batch_size>1 else False, criterion = args.loss)
# # model = BertForMultipleChoice.from_pretrained(pretrained_data, hidden_dropout_prob = drop, attention_probs_dropout_prob = drop, return_dict=True)
if args.unfreeze:
if args.baseline == "bert":
for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
#print('I will be frozen: {}'.format(name))
param.requires_grad = False
model.to(device)
# elif args.pretrain == 'sptype+bertbc':
# if args.qtype == 'YN':
# if args.baseline == 'bert':
# model = BertForBooleanQuestionYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
# if args.unfreeze:
# for name, param in list(model.bert.named_parameters())[:(-12 * args.unfreeze)-2]:
# #print('I will be frozen: {}'.format(name))
# param.requires_grad = False
# elif args.baseline == 'albert':
# model = ALBertForBooleanQuestionYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
# elif args.baseline == 'xlnet':
# model = XLNETForBooleanQuestionYN.from_pretrained(pretrained_data, device = device, no_dropout= args.dropout)
# model.to(device)
# model
# optimizer = None
if args.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr = args.lr)
elif args.optim == 'adamw':
optimizer = torch.optim.AdamW(model.parameters(), lr = args.lr)
elif args.optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr = args.lr)
criterion = None
# if args.loss == 'cross':
# criterion = nn.CrossEntropyLoss()
#zero_evaluation of model before any training
if args.has_zero_eval:
zero_test_file = open(result_adress+'/zero_step_test.txt','w')
test_accuracy = test(model
, pretrain = args.pretrain
, baseline = args.baseline
, test_or_dev = 'test'
, num_sample = args.test
, train_num = train_num
, unseen = False
, qtype = args.qtype
, other = args.other_var
, data_name = ("human" if args.human else args.dataset)
, save_data = args.save_data
, device = device
, file = zero_test_file
)
# test_all_accuracy.append(test_accuracy)
zero_test_file.close()
#training starts
all_loss, inter_test_all_accuracy, dev_all_accuracy, inter_test_unseen_all_accuracy, human_all_accuracy = [], [], [],[], []
all_f1, inter_test_all_f1, dev_all_f1, inter_test_unseen_all_f1, human_all_f1 = [], [], [],[], []
all_accuracy = []
best_val, best_val_unseen = -0.1, -0.1
if not args.no_train:
print('~~~~~~~~~~~~ Train ~~~~~~~~~~~~ ')
train_file = open(result_adress+'/train.txt','w')
inter_test_file = open(result_adress+'/intermediate_test.txt','w')
if args.dev_exists:
dev_file = open(result_adress+'/dev.txt','w')
no_changes = 0
for ep in tqdm(range(epochs)):
#train
if args.no_train != True:
print('******** Epoch '+str(ep)+' ******** ', file = train_file)
losses, result = train(model
, criterion = criterion
, optimizer = optimizer
, pretrain = args.pretrain
, baseline = args.baseline
, start = args.start
, num_sample = args.train
, train_num = (int(args.stepgame_train_set) if args.stepgame_train_set else None) if args.dataset == "stepgame" else train_num
, qtype = args.qtype
, data_name = "human" if args.human else args.dataset
, other = args.other_var
, device = device
, train_log = args.train_log
, file = train_file
, batch_size = args.batch_size
)
#result[0] == accuracy, result[1] if exists = f1
all_loss.append(losses)
if args.qtype == 'YN' or args.pretrain in ["tokencls", "sptypecls", "spcls"]:
all_f1.append(result[1])
all_accuracy.append(result[0])
#save model
if not args.no_save:
# print('/tank/space/rshnk/'+args.model_folder+'/model_'+args.baseline+('' if args.dataset == 'spartqa' else '_'+args.dataset)+'_final_'+args.model+'.th')
torch.save(model, model_address+'/model_'+args.baseline+'_'+args.dataset+'_final_'+args.model+'.th')
if not args.dev_exists or args.test_track:
# if args.human:
inter_test_result = test(model
, pretrain = args.pretrain
, baseline = args.baseline
, test_or_dev = 'test'
, num_sample = args.test
, train_num = args.stepgame_test_set[0] if args.dataset == "stepgame" else train_num
, unseen = False
, qtype = args.qtype
, other = args.other_var
, data_name = "human" if args.human else args.dataset
, save_data = args.save_data
, device = device
, file = inter_test_file
)
# else:
# inter_test_accuracy = test(model, args.pretrain, args.baseline, 'test', args.test, False, args.qtype, args.other_var, args.humantest, device, inter_test_file)
if args.qtype == 'YN' or args.pretrain in ["tokencls", "sptypecls", "spcls"]:
inter_test_all_f1.append(inter_test_result[1])
f1 = inter_test_result[1]
inter_test_all_accuracy.append(inter_test_result[0])
accu = inter_test_result[0]
# show image of accuracy
plt.figure()
plt.plot(inter_test_all_accuracy, label="accuracy")
plt.legend()
plt.savefig(result_adress+'/inter_test_plot_acc.png')
# plt.show()
plt.close()
if args.qtype == 'YN' or args.pretrain in ["tokencls", "sptypecls", "spcls"]:
plt.figure()
plt.plot(inter_test_all_f1, label="f1")
plt.legend()
plt.savefig(result_adress+'/inter_test_plot_f1.png')
# plt.show()
plt.close()
#valid (actucally test)
if args.dev_exists:
print('******** Epoch '+str(ep)+' ******** ', file = dev_file)
dev_result = test(model
, pretrain = args.pretrain
, baseline = args.baseline
, test_or_dev = 'dev'
, num_sample = args.dev
, train_num = args.stepgame_test_set[0] if args.dataset == "stepgame" else train_num
, unseen = False
, qtype = args.qtype
, other = args.other_var
, data_name = "human" if args.human else args.dataset
, save_data = args.save_data
, device = device
, file = dev_file
)
dev_all_accuracy.append(dev_result[0])
if args.qtype == 'YN' or args.pretrain in ["tokencls", "sptypecls", "spcls"]:
f1 = dev_result[1]
dev_all_f1.append(f1)
accu = dev_result[0]
# show image of accuracy
plt.figure()
plt.plot(dev_all_accuracy, label="accuracy")
plt.legend()
plt.savefig(result_adress+'/dev_plot_acc.png')
# plt.show()
plt.close()
if args.qtype == 'YN' or args.pretrain in ["tokencls", "sptypecls", "spcls"]:
plt.figure()
plt.plot(dev_all_f1, label="f1")
plt.legend()
plt.savefig(result_adress+'/dev_plot_f1.png')
# plt.show()
plt.close()
if not args.no_save:
if args.best_model == 'accuracy' and best_val < accu:
torch.save(model, model_address+'/model_'+args.baseline+'_'+args.dataset+'_best_'+args.model+'.th')
best_val = accu
no_changes = 0
elif args.best_model == 'f1' and best_val < f1:
torch.save(model, model_address+'/model_'+args.baseline+'_'+args.dataset+'_best_'+args.model+'.th')
best_val = f1
no_changes = 0
else:
no_changes += 1
# show image of accuracy
if args.no_train != True:
plt.figure()
plt.plot(all_accuracy, label="accuracy")
plt.legend()
plt.savefig(result_adress+'/train_plot_acc.png')
# plt.show()
plt.close()
if args.qtype == 'YN' or args.pretrain in ["tokencls", "sptypecls", "spcls"]:
plt.figure()
plt.plot(all_f1, label="f1")
plt.legend()
plt.savefig(result_adress+'/train_plot_f1.png')
# plt.show()
plt.close()
#show image of losses
plt.figure()
plt.plot(all_loss, label="loss")
plt.legend()
plt.savefig(result_adress+'/train_plot_loss.png')
# plt.show()
plt.close()
"""
check if there is three epochs consequently that the result is not better break
to do this we intialize a variable no_changes which +=1 if there is no changes
"""
# if no_changes == 10: break
if not args.no_train:
train_file.close()
inter_test_file.close()
if args.dev_exists:
dev_file.close()
if args.load and args.no_train:
best_model = model
best_model.to(device)
elif args.no_train:
best_model = model
best_model.to(device)
else:
best_model = torch.load(model_address+'/model_'+args.baseline+'_'+args.dataset+'_best_'+args.model+'.th', map_location={'cuda:0': 'cuda:'+str(args.cuda),'cuda:1': 'cuda:'+str(args.cuda),'cuda:2': 'cuda:'+str(args.cuda),'cuda:3': 'cuda:'+str(args.cuda), 'cuda:5': 'cuda:'+str(args.cuda), 'cuda:4': 'cuda:'+str(args.cuda), 'cuda:6': 'cuda:'+str(args.cuda),'cuda:7': 'cuda:'+str(args.cuda)})
best_model.to(device)
print('~~~~~~~~~~~~ Test ~~~~~~~~~~~~ ')
if not args.human and args.dataset == "stepgame":
for i in args.stepgame_test_set:
test_file = open(result_adress+'/test_qa_'+str(i)+'.txt','w')
test_accuracy = test(best_model
, pretrain = args.pretrain
, baseline = args.baseline
, test_or_dev = 'test'
, num_sample = args.test
, train_num = i
, unseen = False
, qtype = args.qtype
, other = args.other_var
# , sent_num = i
, save_data = args.save_data
, device = device
, file = test_file
)
test_file.close()
elif not args.human:
test_file = open(result_adress+'/test.txt','w')
test_accuracy = test(best_model
, pretrain = args.pretrain
, baseline = args.baseline
, test_or_dev = 'test'
, num_sample = args.test
, train_num = args.stepgame_test_set if args.dataset == "stepgame" else train_num
, unseen = False
, qtype = args.qtype
, other = args.other_var
, data_name = ("human" if args.human else args.dataset)
, save_data = args.save_data
, device = device
, file = test_file
)
# test_all_accuracy.append(test_accuracy)
test_file.close()
if args.unseentest:
inter_test_unseen_file = open(result_adress+'/unseen_test.txt','w')
inter_test_unseen_accuracy = test(best_model
, pretrain = args.pretrain
, baseline = args.baseline
, test_or_dev = 'test'
, num_sample = args.unseen
, train_num = train_num
, unseen = True
, qtype = args.qtype
, other = args.other_var
, data_name = ("human" if args.human else args.dataset)
, save_data = args.save_data
, device = device
, file = inter_test_unseen_file
)
# inter_test_unseen_all_accuracy.append(inter_test_unseen_accuracy)
inter_test_unseen_file.close()
if args.humantest:
human_file = open(result_adress+'/human_test.txt','w')
human_accuracy = test(best_model
, pretrain = args.pretrain
, baseline = args.baseline
, test_or_dev = 'test'
, num_sample = args.test
, train_num = train_num
, unseen = False
, qtype = args.qtype
, other = args.other_var
, data_name = "human"
, save_data = args.save_data
, device = device
, file = human_file
)
# human_all_accuracy.append(human_accuracy)
human_file.close()
#test starts
if args.con != 'not' :
print('~~~~~~~~~~~~ Consistency and Contrast ~~~~~~~~~~~~ ')
if args.con == 'consistency':
con_file = open(result_adress+'/consistency.txt','w')
test_accuracy = consistency(model, args.pretrain, args.baseline, args.test, args.qtype, args.other_var, args.human, device, con_file)
con_file.close()
elif args.con == 'contrast':
con_file = open(result_adress+'/contrast.txt','w')
test_accuracy = contrast(model, args.pretrain, args.baseline, args.test, args.qtype, args.other_var, args.human, device, con_file)
con_file.close()
elif args.con == 'both':
cons_file = open(result_adress+'/consistency.txt','w')
test_accuracy = consistency(model, args.pretrain, args.baseline, args.test, args.qtype, args.other_var, args.human, device, cons_file)
cons_file.close()
cont_file = open(result_adress+'/contrast.txt','w')
test_accuracy = contrast(model, args.pretrain, args.baseline, args.test, args.qtype, args.other_var, args.human, device, cont_file)
cont_file.close()
| 51,033 | 52.327064 | 613 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/PLModels.py | # from transformers import BertPreTrainedModel, BertModel, BertOnlyMLMHead
from transformers import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss, BCELoss
from typing import Union, List
import numpy as np
from torch.autograd import Variable
def weights_init_normal(m):
"""Takes in a module and initializes all linear layers with weight
values taken from a normal distribution."""
classname = m.__class__.__name__
# for every Linear layer in a model
if classname.find("Linear") != -1:
try:
y = m.in_features
# m.weight.data shoud be taken from a normal distribution
m.weight.data.normal_(0.0, 1 / np.sqrt(y))
# m.bias.data should be 0
m.bias.data.fill_(0)
except:
# bound = 1 / math.sqrt(m.weight.size(-1))
# torch.nn.init.uniform_(m.weight.data, -bound, bound)
y = m.in_channels
# m.weight.data shoud be taken from a normal distribution
m.weight.data.normal_(0.0, 1 / np.sqrt(y))
print(m)
# raise
class FocalLoss(nn.Module):
r"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
super(FocalLoss, self).__init__()
if alpha is None:
self.alpha = Variable(torch.ones(class_num, 1))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets):
N = inputs.size(0)
C = inputs.size(1)
P = F.softmax(inputs)
class_mask = inputs.data.new(N, C).fill_(0)
class_mask = Variable(class_mask)
ids = targets.view(-1, 1)
class_mask.scatter_(1, ids.data, 1.)
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
alpha = self.alpha[ids.data.view(-1)]
probs = (P*class_mask).sum(1).view(-1,1)
log_p = probs.log()
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class BertBooleanQuestionYN(BertPreTrainedModel):
def __init__(self, config,
device = 'cuda:0',
no_dropout = False,
num_labels_YN = 2,
dataset = "spartun"# LM = "bert",# has_batch = False, # criterion = 'cross'
):
super().__init__(config)
#LM = "bert", #has_batch = False, #criterion = "cross"
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device2 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels = num_labels_YN
if dataset == "spartqa":
self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device2)
elif dataset == "human":
self.alphas = torch.tensor([[0.5, 0.5], [0.6, 0.4], [0.1, 0.9] ]).to(self.device2)
elif dataset == "spartun":
self.alphas = torch.tensor([[0.55, 0.45], [0.45, 0.55]]).to(self.device2)
else:
self.alphas = torch.tensor([[0.5, 0.5]]*self.num_labels_YN).to(self.device2)
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
classifiers = []
self.criterion = []
for item in range(self.num_labels):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
task = None,
multi_task = None
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = []
for ind in range(self.num_labels):
logit = self.classifiers[ind](pooled_output)
logits.append(logit)
# logits.append(logit.squeeze(0))
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit, labels[:, ind])
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
else:
out_logits = []
for ind, logit in enumerate(logits):
out_logits.append(self.softmax(logit))
outputs = (None, torch.stack(out_logits),) + outputs[2:]
return outputs
class BertBooleanQuestionFR(BertPreTrainedModel):
def __init__(self, config,
device = 'cuda:0',
no_dropout = False,
num_labels_FR = 7,
dataset = "spartun"# LM = "bert",# has_batch = False, # criterion = 'cross'
):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels = num_labels_FR
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
if dataset == "spartqa":
self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.2, 0.98], [0.2, 0.98]]).to(self.device1)
# self.alphas = torch.tensor([[0.15, 0.85], [0.25, 0.75], [0.25, 0.75], [0.4, 0.6], [0.06, 0.94], [0.01, 0.99] ,[0.01, 0.99]]).to(self.device1)
# self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.05, 0.95], [0.01, 0.99] ,[0.01, 0.99]]).to(self.device1)
# self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.05, 0.95], [0.01, 0.99] ,[0.01, 0.99], [0.01, 0.99]]).to(self.device1)
elif dataset == "human":
self.alphas = torch.tensor([[0.35, 0.65], [0.25, 0.75], [0.25, 0.75], [0.2, 0.8], [0.25, 0.75], [0.45, 0.55], [0.05, 0.95]]).to(self.device1)
elif dataset == "spartun":
self.alphas = torch.tensor([[0.1, 0.9], [0.1, 0.9], [0.25, 0.75], [0.25, 0.75], [0.1, 0.9], [0.1, 0.9], [0.04, 0.96], [0.15, 0.85], [0.24, 0.76], [0.07, 0.93], [0.02, 0.98], [0.05, 0.95], [0.12, 0.88], [0.05, 0.95], [0.1, 0.9]]).to(self.device1)
elif dataset == "babi":
self.alphas = torch.tensor([[0.60, 0.4], [0.57, 0.43], [0.6, 0.4], [0.41, 0.59]]).to(self.device1)
else:
self.alphas = torch.tensor([[0.5, 0.5]]*self.num_labels).to(self.device1)
classifiers = []
self.criterion = []
for item in range(self.num_labels):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
task = None,
multi_task = None
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# print(pooled_output.shape)
logits = []
for ind in range(self.num_labels):
logit = self.classifiers[ind](pooled_output)
# logit = self.classifiers[ind](pooled_output[ind])
logits.append(logit)
# for check on YN
# for ind in range(7):
# logit = self.classifiers[ind](pooled_output)
# logits.append(logit)
# print("FR",logits)
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit, labels[:, ind])
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
else:
out_logits = []
for ind, logit in enumerate(logits):
out_logits.append(self.softmax(logit))
outputs = (None, torch.stack(out_logits),) + outputs[2:]
return outputs # (loss), reshaped_logits, (hidden_st
class BertMultipleClass(BertPreTrainedModel):
def __init__(self, config,
device = 'cuda:0',
no_dropout = False,
num_classes = 3,
dataset = "spartun",
qtype = "FR"
):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = num_classes
self.qtype = qtype
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
if self.qtype == 'YN':
if dataset == "spartqa":
self.alphas = torch.tensor([0.67, 1.3, 1.32]).to(self.device1)
else:
self.alphas = torch.tensor([1/self.num_classes]*self.num_classes).to(self.device1)
elif self.qtype == "FR":
# self.alphas = torch.tensor([0.125]*self.num_classes).to(self.device1)
self.alphas = torch.tensor([1/self.num_classes]*self.num_classes).to(self.device1)
# classifiers = []
# self.criterion = []
# for item in range(1):
self.classifiers= nn.Linear(config.hidden_size, self.num_classes)
self.criterion= FocalLoss(alpha=self.alphas, class_num=self.num_classes, gamma = 2)
# self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# for ind in range(1):
logit = self.classifiers(pooled_output)
# logits = logit.squeeze(0)
out_logits = self.softmax(logit)
if labels is not None:
# loss = 0
# out_logits = []
#check labels
loss = self.criterion(logit, labels)
outputs = (loss, out_logits,) + outputs[2:]
else:
outputs = (None, out_logits,) + outputs[2:]
return outputs
class BertMultiTaskMultipleClass(BertPreTrainedModel):
#TODO add criterion to load
def __init__(self, config,
device = 'cuda:0',
no_dropout = False,
num_classes_YN = None,
num_classes_FR = None,
dataset = "stepgame",
LM = "bert",
has_batch = False,
criterion = 'focal'
):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes_YN = num_classes_YN
self.num_classes_FR = num_classes_FR
# self.qtype = qtype
if self.num_classes_YN:
if dataset == "spartqa":
self.alphasYN = torch.tensor([0.67, 1.3, 1.32]).to(self.device1)
elif dataset == "spartun":
self.alphasYN = torch.tensor([0.4, 0.6]).to(self.device1)
else:
self.alphasYN = torch.tensor([1/self.num_classes_YN]*self.num_classes_YN).to(self.device1)
#TODO test MLP, add batch
self.classifiers_YN = nn.Linear(config.hidden_size, self.num_classes_YN)
if criterion == "cross":
self.criterion_YN = nn.CrossEntropyLoss(weight=self.alphas)
else:
self.criterion_YN = FocalLoss(alpha=self.alphasYN, class_num=self.num_classes_YN, gamma = 2)
# self.init_weights()
# self.classifiers_YN.apply(weights_init_normal)
if self.num_classes_FR:
# if dataset == "spartqa":
# else:
#TODO compute for stepgame
self.alphasFR = torch.tensor([1/self.num_classes_FR]*self.num_classes_FR).to(self.device1)
self.classifiers_FR = nn.Linear(config.hidden_size, self.num_classes_FR)
if criterion == "cross":
self.criterion_FR = nn.CrossEntropyLoss(weight=self.alphas)
else:
self.criterion_FR =FocalLoss(alpha=self.alphasFR, class_num=self.num_classes_FR, gamma = 2, size_average=False)
# self.classifiers_FR.apply(weights_init_normal)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
task = "YN",
multi_task = False
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
if task == "YN":
logit = self.classifiers_YN(pooled_output)
elif task == "FR":
logit = self.classifiers_FR(pooled_output)
out_logits = self.softmax(logit)
if labels is not None:
if task == "YN":
loss = self.criterion_YN(logit, labels)
elif task == "FR":
loss = self.criterion_FR(logit, labels)
outputs = (loss, out_logits,) + outputs[2:]
else:
outputs = (None,out_logits,)+ outputs[2:]
return outputs
class BertMultiTaskMultipleClassLoad(BertPreTrainedModel):
#TODO add criterion to load
def __init__(self, config,
device = 'cuda:0',
no_dropout = False,
num_classes_YN = None,
num_classes_FR = None,
dataset = "stepgame",
LM = "bert",
has_batch = False,
criterion = 'focal'
):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes_YN = num_classes_YN
self.num_classes_FR = num_classes_FR
# self.qtype = qtype
if self.num_classes_YN:
if dataset == "spartqa":
self.alphasYN = torch.tensor([0.67, 1.3, 1.32]).to(self.device1)
elif dataset == "spartun":
self.alphasYN = torch.tensor([0.4, 0.6]).to(self.device1)
else:
self.alphasYN = torch.tensor([1/self.num_classes_YN]*self.num_classes_YN).to(self.device1)
#TODO test MLP, add batch
self.classifiers_YN_load = nn.Linear(config.hidden_size, self.num_classes_YN)
if criterion == "cross":
self.criterion_YN = nn.CrossEntropyLoss(weight=self.alphas)
else:
self.criterion_YN = FocalLoss(alpha=self.alphasYN, class_num=self.num_classes_YN, gamma = 2)
# self.init_weights()
# self.classifiers_YN.apply(weights_init_normal)
if self.num_classes_FR:
# if dataset == "spartqa":
# else:
#TODO compute for stepgame
self.alphasFR = torch.tensor([1/self.num_classes_FR]*self.num_classes_FR).to(self.device1)
self.classifiers_FR_load = nn.Linear(config.hidden_size, self.num_classes_FR)
if criterion == "cross":
self.criterion_FR = nn.CrossEntropyLoss(weight=self.alphas)
else:
self.criterion_FR =FocalLoss(alpha=self.alphasFR, class_num=self.num_classes_FR, gamma = 2, size_average=False)
# self.classifiers_FR.apply(weights_init_normal)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
task = "YN",
multi_task = False
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
if task == "YN":
logit = self.classifiers_YN_load(pooled_output)
elif task == "FR":
logit = self.classifiers_FR_load(pooled_output)
out_logits = self.softmax(logit)
if labels is not None:
if task == "YN":
loss = self.criterion_YN(logit, labels)
elif task == "FR":
loss = self.criterion_FR(logit, labels)
outputs = (loss, out_logits,) + outputs[2:]
else:
outputs = (None,out_logits,)+ outputs[2:]
return outputs
#TODO bertpretraining is added
class BertMultiTaskBooleanQuestion(BertPreTrainedModel):
def __init__(self, config,
device = 'cuda:0',
no_dropout = False,
num_labels_YN = None,
num_labels_FR = None,
dataset = "spartqa",
LM = "bert",
has_batch = False,
criterion = "focal"
):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels_YN = num_labels_YN
self.num_labels_FR = num_labels_FR
# self.qtype = qtype
if self.num_labels_YN:
if dataset == "spartqa":
self.alphasYN = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device1)
elif dataset == "human":
self.alphasYN = torch.tensor([[0.5, 0.5], [0.6, 0.4], [0.1, 0.9] ]).to(self.device1)
elif dataset == "spartun":
self.alphasYN = torch.tensor([[0.55, 0.45], [0.45, 0.55]]).to(self.device1)
else:
self.alphasYN = torch.tensor([[0.5, 0.5]]*self.num_labels_YN).to(self.device1)
#initialize the classifier model
classifiers_YN = []
self.criterion_YN = []
for item in range(self.num_labels_YN):
classifiers_YN.append(nn.Linear(config.hidden_size, self.num_classes))
if criterion == "cross":
self.criterion_YN.append(nn.CrossEntropyLoss(weight=self.alphasYN[item]))
else:
self.criterion_YN.append(FocalLoss(alpha=self.alphasYN[item], class_num=self.num_classes, gamma = 2))
self.classifiers_YN = nn.ModuleList(classifiers_YN)
# self.sigma_YN = nn.Parameter(torch.ones(1))
if self.num_labels_FR:
if dataset == "spartqa":
self.alphasFR = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.02, 0.98], [0.02, 0.98]]).to(self.device1)
elif dataset == "human":
self.alphasFR = torch.tensor([[0.35, 0.65], [0.25, 0.75], [0.25, 0.75], [0.2, 0.8], [0.25, 0.75], [0.45, 0.55], [0.05, 0.95]]).to(self.device1)
elif dataset == "spartun":
self.alphasFR = torch.tensor([[0.1, 0.9], [0.1, 0.9], [0.27, 0.73], [0.26, 0.74], [0.1, 0.9], [0.1, 0.9], [0.02, 0.98], [0.15, 0.85], [0.26, 0.74], [0.07, 0.93], [0.002, 0.998], [0.02, 0.98], [0.1, 0.9], [0.03, 0.97], [0.08, 0.92]]).to(self.device1)
elif dataset == "babi":
self.alphasFR = torch.tensor([[0.60, 0.4], [0.57, 0.43], [0.6, 0.4], [0.41, 0.59]]).to(self.device1)
else:
self.alphasFR = torch.tensor([[0.5, 0.5]]*self.num_labels_FR)
classifiers_FR = []
self.criterion_FR = []
#TODO add batch
#TODO changed
for item in range(self.num_labels_FR):
classifiers_FR.append(nn.Linear(config.hidden_size, self.num_classes))
if criterion == "corss":
self.criterion_FR.append(nn.CrossEntropyLoss(weight=self.alphasFR[item]))
else:
self.criterion_FR.append(FocalLoss(alpha=self.alphasFR[item], class_num=self.num_classes, gamma = 2, size_average=True))
self.classifiers_FR = nn.ModuleList(classifiers_FR)
# self.sigma_FR = nn.Parameter(torch.ones(1))
# self.sigma = nn.Parameter(torch.ones(2))
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
# self.classifiers_FR.apply(weights_init_normal)
# self.classifiers_YN.apply(weights_init_normal)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
task = "YN",
multi_task = False
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
#select the cls token
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# logits = []
out_logits = []
loss = 0
# TODO check teh batch siZe
# for ind, logit in enumerate(pooled_output):
if task == "YN":
for ind in range(self.num_labels_YN):
logit = self.classifiers_YN[ind](pooled_output)
if labels is not None:
loss += self.criterion_YN[ind](logit, labels[:, ind])
# logit = self.classifiers1[ind](pooled_output[ind])
# with torch.no_grad():
# logits.append(logit)
out_logits.append(self.softmax(logit))
# logit = self.classifiers1[ind](pooled_output[ind])
elif task == "FR":
for ind in range(self.num_labels_FR):
logit = self.classifiers_FR[ind](pooled_output)
if labels is not None:
loss += self.criterion_FR[ind](logit, labels[:, ind])
# logit = self.classifiers1[ind](pooled_output[ind])
# with torch.no_grad():
# logits.append(logit)
out_logits.append(self.softmax(logit))
# logits.append(logit.squeeze(0))
if labels is not None:
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
else:
outputs = (None,torch.stack( out_logits),) + outputs[2:]
return outputs # (loss), reshaped_logits, (hidden_st
class BertMultiTaskBooleanQuestionLoad(BertPreTrainedModel):
def __init__(self, config,
device = 'cuda:0',
no_dropout = False,
num_labels_YN = None,
num_labels_FR = None,
dataset = "spartqa",
LM = "bert",
has_batch = False,
criterion = "focal"
):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels_YN = num_labels_YN
self.num_labels_FR = num_labels_FR
# self.qtype = qtype
if self.num_labels_YN:
if dataset == "spartqa":
self.alphasYN = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device1)
elif dataset == "human":
self.alphasYN = torch.tensor([[0.5, 0.5], [0.6, 0.4], [0.1, 0.9] ]).to(self.device1)
elif dataset == "spartun":
self.alphasYN = torch.tensor([[0.55, 0.45], [0.45, 0.55]]).to(self.device1)
else:
self.alphasYN = torch.tensor([[0.5, 0.5]]*self.num_labels_YN).to(self.device1)
#initialize the classifier model
classifiers_YN = []
self.criterion_YN = []
for item in range(self.num_labels_YN):
classifiers_YN.append(nn.Linear(config.hidden_size, self.num_classes))
if criterion == "cross":
self.criterion_YN.append(nn.CrossEntropyLoss(weight=self.alphasYN[item]))
else:
self.criterion_YN.append(FocalLoss(alpha=self.alphasYN[item], class_num=self.num_classes, gamma = 2))
self.classifiers_YN_load = nn.ModuleList(classifiers_YN)
# self.sigma_YN = nn.Parameter(torch.ones(1))
if self.num_labels_FR:
if dataset == "spartqa":
self.alphasFR = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.02, 0.98], [0.02, 0.98]]).to(self.device1)
elif dataset == "human":
self.alphasFR = torch.tensor([[0.35, 0.65], [0.25, 0.75], [0.25, 0.75], [0.2, 0.8], [0.25, 0.75], [0.45, 0.55], [0.05, 0.95]]).to(self.device1)
elif dataset == "spartun":
self.alphasFR = torch.tensor([[0.1, 0.9], [0.1, 0.9], [0.27, 0.73], [0.26, 0.74], [0.1, 0.9], [0.1, 0.9], [0.02, 0.98], [0.15, 0.85], [0.26, 0.74], [0.07, 0.93], [0.002, 0.998], [0.02, 0.98], [0.1, 0.9], [0.03, 0.97], [0.08, 0.92]]).to(self.device1)
elif dataset == "babi":
self.alphasFR = torch.tensor([[0.60, 0.4], [0.57, 0.43], [0.6, 0.4], [0.41, 0.59]]).to(self.device1)
else:
self.alphasFR = torch.tensor([[0.5, 0.5]]*self.num_labels_FR)
classifiers_FR = []
self.criterion_FR = []
#TODO add batch
#TODO changed
for item in range(self.num_labels_FR):
classifiers_FR.append(nn.Linear(config.hidden_size, self.num_classes))
if criterion == "corss":
self.criterion_FR.append(nn.CrossEntropyLoss(weight=self.alphasFR[item]))
else:
self.criterion_FR.append(FocalLoss(alpha=self.alphasFR[item], class_num=self.num_classes, gamma = 2, size_average=True))
self.classifiers_FR_load = nn.ModuleList(classifiers_FR)
# self.sigma_FR = nn.Parameter(torch.ones(1))
# self.sigma = nn.Parameter(torch.ones(2))
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
# self.classifiers_FR.apply(weights_init_normal)
# self.classifiers_YN.apply(weights_init_normal)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
task = "YN",
multi_task = False
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
#select the cls token
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# logits = []
out_logits = []
loss = 0
# TODO check teh batch siZe
# for ind, logit in enumerate(pooled_output):
if task == "YN":
for ind in range(self.num_labels_YN):
logit = self.classifiers_YN_load[ind](pooled_output)
if labels is not None:
loss += self.criterion_YN[ind](logit, labels[:, ind])
# logit = self.classifiers1[ind](pooled_output[ind])
# with torch.no_grad():
# logits.append(logit)
out_logits.append(self.softmax(logit))
# logit = self.classifiers1[ind](pooled_output[ind])
elif task == "FR":
for ind in range(self.num_labels_FR):
logit = self.classifiers_FR_load[ind](pooled_output)
if labels is not None:
loss += self.criterion_FR[ind](logit, labels[:, ind])
# logit = self.classifiers1[ind](pooled_output[ind])
# with torch.no_grad():
# logits.append(logit)
out_logits.append(self.softmax(logit))
# logits.append(logit.squeeze(0))
if labels is not None:
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
else:
outputs = (None,torch.stack( out_logits),) + outputs[2:]
return outputs # (loss), reshaped_logits, (hidden_s
class BertForSpatialRelationExtraction(BertPreTrainedModel):
def __init__(self, config, num_labels = 12, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
# self.device = device
self.num_labels = num_labels
self.num_classes = 2
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier1 = nn.Linear(config.hidden_size, self.num_labels)
# self.classifier2 = nn.Linear(config.hidden_size, self.num_type_class)
# self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
self.alphas = torch.tensor([[0.5, 0.5]]*self.num_labels_FR)
classifiers = []
self.criterion = []
for item in range(self.num_labels):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# if criterion == "corss":
# self.criterion.append(nn.CrossEntropyLoss(weight=self.alphasFR[item]))
# else:
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2, size_average=True))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
loss = 0
out_logits = []
for ind in range(self.num_labels_FR):
logit = self.classifiers[ind](pooled_output)
if labels is not None:
loss += self.criterion[ind](logit, labels[:, ind])
out_logits.append(self.softmax(logit))
if labels is not None:
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
else:
outputs = (None,torch.stack( out_logits),) + outputs[2:]
return outputs # (loss), reshaped_logits, (hidden_s
# if labels is not None:
# # label1 = labels[0].float()
# if self.num_labels == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# loss_fct = BCELoss()
# out_logits1 = self.sigmoid(logits1)
# loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
# out_logits1 = logits1#self.softmax(logits1)
# # outputs1 = (loss,) + outputs1
# # label2 = labels[1].long()
# if self.num_type_class == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# out_logits2 = self.sigmoid(logits2)
# loss_fct = BCELoss()
# loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits2.view(-1, self.num_type_class), labels[1].view(-1))
# out_logits2 = logits2#self.softmax(logits2)
# # outputs2 = (loss,) + outputs2
# else:
# out_logits1 = self.sigmoid(logits1)
# out_logits2 = logits2
# outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
# outputs2 = (out_logits2,) + outputs[2:]
# return loss, outputs1, outputs2 # (loss), logits, (hidden_states), (attentions)
class BertForSequenceClassification1(BertPreTrainedModel):
def __init__(self, config, type_class = 0, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
# self.device = device
self.num_labels = config.num_labels
self.num_type_class = type_class
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier1 = nn.Linear(config.hidden_size, self.num_labels)
self.classifier2 = nn.Linear(config.hidden_size, self.num_type_class)
# self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
self.init_weights()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits1 = self.classifier1(pooled_output)
logits2 = self.classifier2(pooled_output)
loss = 0
if labels is not None:
# label1 = labels[0].float()
if self.num_labels == 1:
# We are doing regression
# loss_fct = MSELoss()
loss_fct = BCELoss()
out_logits1 = self.sigmoid(logits1)
loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
out_logits1 = logits1#self.softmax(logits1)
# outputs1 = (loss,) + outputs1
# label2 = labels[1].long()
if self.num_type_class == 1:
# We are doing regression
# loss_fct = MSELoss()
out_logits2 = self.sigmoid(logits2)
loss_fct = BCELoss()
loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits2.view(-1, self.num_type_class), labels[1].view(-1))
out_logits2 = logits2#self.softmax(logits2)
# outputs2 = (loss,) + outputs2
else:
out_logits1 = self.sigmoid(logits1)
out_logits2 = logits2
outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
outputs2 = (out_logits2,) + outputs[2:]
return loss, outputs1, outputs2 # (loss), logits, (hidden_states), (attentions)
# class BertForBooleanQuestionFB(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device2 = device
# self.bert = BertModel(config)
# self.bert_answer = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alpha = torch.tensor([0.5, 0.5]).to(self.device2)
# self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
# self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
# self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
# self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# options=None
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[0]
# pooled_output = self.dropout(pooled_output)
# pooled_output, _ = self.rnn(pooled_output)
# pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
# logits = self.classifier(pooled_output)
# if labels is not None:
# loss = self.criterion(logits, labels)
# # print(loss)
# out_logits = self.softmax(logits)
# outputs = (loss, out_logits,) + outputs[2:]
# # outputs = (,) + outputs
# return outputs
# class BertForBooleanQuestionCO(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device2 = device
# self.bert = BertModel(config)
# self.bert_answer = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alpha = torch.tensor([0.35, 0.65]).to(self.device2)
# self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
# self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
# self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
# self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# options=None
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[0]
# pooled_output = self.dropout(pooled_output)
# pooled_output, _ = self.rnn(pooled_output)
# pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
# logits = self.classifier(pooled_output)
# if labels is not None:
# loss = self.criterion(logits, labels)
# out_logits = self.softmax(logits)
# outputs = (loss, out_logits,) + outputs[2:]
# return outputs
# class BertForBooleanQuestionBabi(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alpha = torch.tensor([0.5, 0.5]).to(self.device)
# self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
# self.criterion = CrossEntropyLoss()
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# # print('pool out: ', pooled_output)
# pooled_output = self.dropout(pooled_output)
# # print(pooled_output.shape)
# logits = self.classifier(pooled_output)
# # print('logit tu', logits)
# if labels is not None:
# loss = self.criterion(logits, labels)
# logits = self.softmax(logits)
# outputs = (loss, logits,) + outputs[2:]
# return outputs # (loss), reshaped_logits, (hidden_st
# class BertForBooleanQuestionYN(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device2 = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# # self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device)
# # self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5] ]).to('cuda:3')
# # self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [1, 0] ]).to(self.device)
# self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device2)
# classifiers = []
# self.criterion = []
# for item in range(3):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits = []
# for ind in range(3):
# logit = self.classifiers[ind](pooled_output)
# logits.append(logit.squeeze(0))
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# return outputs
# class BertForBooleanQuestionYN1(BertPreTrainedModel):
# def __init__(self,config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.num_labels = 3
# self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device1)
# # self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device)
# classifiers = []
# self.criterion1 = []
# for item in range(self.num_labels):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion1.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers1 = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits = []
# for ind in range(self.num_labels):
# logit = self.classifiers1[ind](pooled_output)
# logits.append(logit.squeeze(0))
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# loss += self.criterion1[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# return outputs
# class BertForBooleanQuestionCO1(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.bert_answer = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier1 = nn.Linear(config.hidden_size, self.num_classes)
# self.alpha = torch.tensor([0.35, 0.65]).to(self.device1)
# self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
# self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
# self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
# self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# options=None
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[0]
# pooled_output = self.dropout(pooled_output)
# pooled_output, _ = self.rnn(pooled_output)
# pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
# logits = self.classifier1(pooled_output)
# if labels is not None:
# loss = self.criterion(logits, labels)
# out_logits = self.softmax(logits)
# outputs = (loss, out_logits,) + outputs[2:]
# return outputs
# class BertForBooleanQuestionFB1(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.bert_answer = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier1 = nn.Linear(config.hidden_size, self.num_classes)
# self.alpha = torch.tensor([0.5, 0.5]).to(self.device1)
# self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
# self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
# self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
# self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# options=None
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[0]
# pooled_output = self.dropout(pooled_output)
# pooled_output, _ = self.rnn(pooled_output)
# pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
# logits = self.classifier1(pooled_output)
# if labels is not None:
# loss = self.criterion(logits, labels)
# # print(loss)
# out_logits = self.softmax(logits)
# outputs = (loss, out_logits,) + outputs[2:]
# # outputs = (,) + outputs
# return outputs
# class BertForBooleanQuestionFR1(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# #self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.2, 0.98], [0.2, 0.98]]).to(self.device1)
# classifiers = []
# self.criterion = []
# for item in range(7):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers1 = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# # print(pooled_output.shape)
# logits = []
# for ind, logit in enumerate(pooled_output):
# logit = self.classifiers1[ind](pooled_output[ind])
# logits.append(logit)
# # for check on YN
# # for ind in range(7):
# # logit = self.classifiers[ind](pooled_output)
# # logits.append(logit)
# # print("FR",logits)
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# return outputs # (loss), reshaped_logits, (hidden_st
# class BertForBooleanQuestionYNboolq(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# # self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device)
# # self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5] ]).to('cuda:3')
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device1)
# classifiers = []
# self.criterion = []
# for item in range(2):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits = []
# for ind in range(2):
# logit = self.classifiers[ind](pooled_output)
# logits.append(logit.squeeze(0))
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# return outputs
# class BertForBooleanQuestionYNsprlqa(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.num_labels = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device1)
# classifiers = []
# self.criterion = []
# for item in range(self.num_labels):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits = []
# for ind in range(self.num_labels):
# logit = self.classifiers[ind](pooled_output)
# logits.append(logit.squeeze(0))
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# return outputs
# class BertForBooleanQuestionYNsprlqaLoad(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.num_labels = 2
# self.classifier1 = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device1)
# classifiers = []
# self.criterion1 = []
# for item in range(self.num_labels):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion1.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers1 = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax1 = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits = []
# for ind in range(self.num_labels):
# logit = self.classifiers1[ind](pooled_output)
# logits.append(logit.squeeze(0))
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# loss += self.criterion1[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax1(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# return outputs
# class BertForBooleanQuestionFRsprlqa(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device1 = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# # self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.2, 0.98], [0.2, 0.98]]).to(self.device2)
# self.alphas = torch.tensor([[0.09, 0.91], [0.44, 0.56], [0.007, 0.993], [0.007, 0.993], [0.086, 0.914], [0.057, 0.943], [0.013, 0.987], [0.13, 0.87], [0.03, 0.97], [0.006, 0.994], [0.13, 0.87], [0.007, 0.993], [0.0015, 0.9985], [0.013, 0.987], [0.003, 0.997], [0.003, 0.997], [0.006, 0.994], [0.003, 0.997]]).to(self.device1)
# classifiers = []
# self.criterion = []
# for item in range(18):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# # print(pooled_output.shape)
# logits = []
# for ind, logit in enumerate(pooled_output):
# logit = self.classifiers[ind](pooled_output[ind])
# logits.append(logit)
# # for check on YN
# # for ind in range(7):
# # logit = self.classifiers[ind](pooled_output)
# # logits.append(logit)
# # print("FR",logits)
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# return outputs
# class BertForBooleanQuestion3ClassYN(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [1, 0] ]).to(self.device)
# classifiers = []
# self.criterion = []
# for item in range(3):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# # print('$',pooled_output.shape)
# logits = []
# # for ind, logit in enumerate(pooled_output):
# # logit = self.classifiers[ind](pooled_output[ind])
# # logits.append(logit)
# for ind in range(3):
# logit = self.classifiers[ind](pooled_output)
# # print("#", logit.squeeze(0).shape)
# logits.append(logit.squeeze(0))
# if labels is not None:
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# # weights = torch.ones(2).float()
# # alpha = self.alphas[ind]
# # print("**",labels.shape ,labels[ind], labels[ind].unsqueeze(0))
# # print("**",logit.shape)
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# # outputs = (,) + outputs
# return outputs # (loss), reshaped_logits, (hidden_st
# class BertForSequenceClassification1(BertPreTrainedModel):
# def __init__(self, config, type_class = 0, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# # self.device = device
# self.num_labels = config.num_labels
# self.num_type_class = type_class
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier1 = nn.Linear(config.hidden_size, self.num_labels)
# self.classifier2 = nn.Linear(config.hidden_size, self.num_type_class)
# # self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
# self.init_weights()
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits1 = self.classifier1(pooled_output)
# logits2 = self.classifier2(pooled_output)
# loss = 0
# if labels is not None:
# # label1 = labels[0].float()
# if self.num_labels == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# loss_fct = BCELoss()
# out_logits1 = self.sigmoid(logits1)
# loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
# out_logits1 = logits1#self.softmax(logits1)
# # outputs1 = (loss,) + outputs1
# # label2 = labels[1].long()
# if self.num_type_class == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# out_logits2 = self.sigmoid(logits2)
# loss_fct = BCELoss()
# loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits2.view(-1, self.num_type_class), labels[1].view(-1))
# out_logits2 = logits2#self.softmax(logits2)
# # outputs2 = (loss,) + outputs2
# else:
# out_logits1 = self.sigmoid(logits1)
# out_logits2 = logits2
# outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
# outputs2 = (out_logits2,) + outputs[2:]
# return loss, outputs1, outputs2 # (loss), logits, (hidden_states), (attentions)
# class BertForSequenceClassification2(BertPreTrainedModel):
# def __init__(self, config, type_class = 0, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# # self.device = device
# self.num_labels = config.num_labels
# self.num_type_class1 = type_class
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier1 = nn.Linear(config.hidden_size, self.num_labels)
# self.classifier21 = nn.Linear(config.hidden_size, self.num_type_class1)
# # self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
# self.init_weights()
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits1 = self.classifier1(pooled_output)
# logits2 = self.classifier21(pooled_output)
# loss = 0
# if labels is not None:
# # label1 = labels[0].float()
# if self.num_labels == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# loss_fct = BCELoss()
# out_logits1 = self.sigmoid(logits1)
# loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
# out_logits1 = logits1#self.softmax(logits1)
# # outputs1 = (loss,) + outputs1
# # label2 = labels[1].long()
# if self.num_type_class1 == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# out_logits2 = self.sigmoid(logits2)
# loss_fct = BCELoss()
# loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits2.view(-1, self.num_type_class1), labels[1].view(-1))
# out_logits2 = logits2#self.softmax(logits2)
# # outputs2 = (loss,) + outputs2
# else:
# out_logits1 = self.sigmoid(logits1)
# out_logits2 = logits2
# outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
# outputs2 = (out_logits2,) + outputs[2:]
# return loss, outputs1, outputs2 # (loss), logits, (hidden_states), (attentions)
# class BertForSequenceClassification3(BertPreTrainedModel):
# def __init__(self, config, type_class = 0, num_asked_class = 1, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# # self.device = device
# self.num_labels = config.num_labels
# self.num_type_class1 = type_class
# self.num_asked_class = num_asked_class
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier1 = nn.Linear(config.hidden_size, self.num_labels)
# self.classifier21 = nn.Linear(config.hidden_size, self.num_type_class1)
# # self.classifier3 = nn.Linear(config.hidden_size, self.num_asked_class)
# # self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
# self.init_weights()
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# asked_compute = None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits1 = self.classifier1(pooled_output)
# logits2 = self.classifier21(pooled_output)
# # if asked_compute != None:
# # logits3 = self.classifier3(pooled_output)
# loss = 0
# if labels is not None:
# # label1 = labels[0].float()
# if self.num_labels == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# loss_fct = BCELoss()
# out_logits1 = self.sigmoid(logits1)
# loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
# out_logits1 = logits1#self.softmax(logits1)
# # outputs1 = (loss,) + outputs1
# # label2 = labels[1].long()
# if self.num_type_class1 == 1:
# # We are doing regression
# # loss_fct = MSELoss()
# out_logits2 = self.sigmoid(logits2)
# loss_fct = BCELoss()
# loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss += loss_fct(logits2.view(-1, self.num_type_class1), labels[1].view(-1))
# out_logits2 = logits2#self.softmax(logits2)
# # outputs2 = (loss,) + outputs2
# # if asked_compute !=None and self.num_asked_class == 1:
# # loss_fct = BCELoss()
# # out_logits3 = self.sigmoid(logits3)
# # loss += loss_fct(out_logits3.view(-1), labels[0].view(-1))
# # else:
# # out_logits3 = None
# else:
# out_logits1 = self.sigmoid(logits1)
# out_logits2 = logits2
# # out_logits3 = logits3 if asked_compute != None else None
# outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
# outputs2 = (out_logits2,) + outputs[2:]
# # outputs3 = (out_logits3,) + outputs[2:]
# return loss, outputs1, outputs2#, outputs3 # (loss), logits, (hidden_states), (attentions)
# class BertForSequenceClassification(BertPreTrainedModel):
# def __init__(self, config, type_class = 0, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# self.num_labels = config.num_labels
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# self.sigmoid = nn.Sigmoid()
# self.init_weights()
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# pooled_output = self.dropout(pooled_output)
# logits = self.classifier(pooled_output)
# outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
# if labels is not None:
# if self.num_labels == 1:
# # We are doing regression
# loss_fct = BCELoss()
# loss = loss_fct(self.sigmoid(logits).view(-1), labels.view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# outputs = (loss,) + outputs
# return outputs # (loss), logits, (hidden_states), (attentions)
| 85,919 | 35.176842 | 335 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/Create_LM_input_output.py | # checking with BERT
from unittest.util import _MAX_LENGTH
from torchnlp.nn import attention
from transformers import BertTokenizer, BertTokenizerFast, RobertaTokenizer, RobertaTokenizerFast
import torch
import random
import torch.nn as nn
tokenizer, tokenizerFast = None, None
baseline = None
def initialize_tokenizer(lm_model, pretrained_data):
global tokenizer, tokenizerFast, baseline
if lm_model == "bert":
tokenizer = BertTokenizer.from_pretrained(pretrained_data)
tokenizerFast = BertTokenizerFast.from_pretrained(pretrained_data)
baseline = lm_model
elif lm_model == "roberta":
tokenizer = RobertaTokenizer.from_pretrained(pretrained_data)
tokenizerFast = RobertaTokenizerFast.from_pretrained(pretrained_data)
baseline = lm_model
def boolean_classification(model, input_text, q_type = None, candidate = [] ,correct_labels = None, other = None, device = "cuda:0", dataset = None, multi_task = False):
"""
all of the questions with the same task are passed
first we concat the questions and text and rules
and then create the input_ids and token_types
"""
# TODO changed
encoding = tokenizer(input_text, return_attention_mask = True, return_tensors="pt", padding=True, max_length = 512)
# encoding = tokenizer(input_text, max_length=512, pad_to_max_length = True, return_attention_mask = True, return_tensors="pt")
# if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
input_ids, token_type_ids = [], []
input_ids = encoding["input_ids"].to(device)
# if baseline == "roberta":
# token_type_ids = None
# else:
# token_type_ids = encoding["token_type_ids"].to(device)
# for k in range(len(token_type_ids)):
# key = list(input_ids[k]).index(102)
# for i in range(key):
# token_type_ids[k][i] = 1
labels = []
for correct_label in correct_labels:
if q_type == 'FR':
label = torch.tensor([0]*(len(candidate)), device = device).long()
for opt in correct_label:
if dataset in ["spartqa", "babi"]: # answer is the id of correct label
label[opt] = 1
else:
label[candidate.index(opt.lower())] = 1
elif q_type == 'YN' and (other == "noDK" or dataset in[ "sprlqa", "babi", "boolq","spartun"]):
if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
elif q_type == 'YN': #spartqa,
if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
else: label = torch.tensor([0,0,1], device = device).long()
labels += [label]
if labels == [] : labels = None
else: labels = torch.stack(labels)
outputs = model(input_ids, labels=labels, task = q_type, multi_task = multi_task, attention_mask = encoding["attention_mask"].to(device))
# outputs = model(input_ids, labels=labels, task = q_type, multi_task = multi_task, attention_mask = encoding["attention_mask"].to(device))
loss, logits = outputs[:2]
logits = torch.transpose(logits, 0, 1)
outs = []
for logit in logits:
out = []
if q_type == 'FR':
#Do some inference
# remove those relations that cannot happened at the same time and they are both true.
out_logit = [torch.argmax(log) for log in logit]
out = Prune_FR_answer_based_on_constraints(out_logit, logit, candidate)
if dataset not in ["spartqa", "babi"]: out = [candidate[i] for i in out]
elif q_type == 'YN' and (len(candidate) == 2 or other == "noDK"):
max_arg = torch.argmax(logit[:, 1])
# print("2", max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
elif q_type == 'YN': #has three candidate labels
max_arg = torch.argmax(logit[:, 1])
# print(correct_label, logits, max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
outs += [out]
return loss, outs #, out_logit
def multiple_classification(model, input_text, q_type, candidate ,correct_labels , other = None, device = "cpu", dataset = None, multi_task = False):
# encoding = tokenizer.encode_plus(question, text)
# print(text, question, candidate, correct_label)
# if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
encoding = tokenizer(input_text, max_length=512, return_tensors="pt", padding=True, return_attention_mask = True)
# encoding = tokenizer(input_text, max_length=512, pad_to_max_length = True, return_attention_mask = True, return_tensors="pt")
input_ids, token_type_ids = [], []
input_ids = encoding["input_ids"].to(device)
if baseline == "roberta":
token_type_ids = None
else:
token_type_ids = encoding["token_type_ids"].to(device)
for k in range(len(token_type_ids)):
key = list(input_ids[k]).index(102)
for i in range(key):
token_type_ids[k][i] = 1
labels = []
for correct_label in correct_labels:
if q_type == 'FR':
#TODO check this for stepgame
if dataset not in [ "spartqa", "stepgame"]:
_correct_label = []
for label in correct_label:
_correct_label += [candidate.index(label.lower())]
if dataset == "stepgame":
_correct_label = correct_label[0]
#TODO check this
else: _correct_label = correct_label[0]
label = torch.tensor(_correct_label, device = device).long()
elif q_type == 'YN' and (other == "noDK" or dataset in[ "sprlqa", "babi", "boolq"]):
if correct_label == ['Yes']: label = torch.tensor(0, device = device).long()
elif correct_label == ['No']: label = torch.tensor(1, device = device).long()
elif q_type == 'YN': #spartqa,
if correct_label == ['Yes']: label = torch.tensor(0, device = device).long()
elif correct_label == ['No']: label = torch.tensor(1, device = device).long()
else: label = torch.tensor(2, device = device).long()
labels += [label]
if labels == [] : labels = None
else: labels = torch.stack(labels)
outputs = model(input_ids, labels=labels, token_type_ids = token_type_ids, attention_mask = encoding["attention_mask"].to(device), task = q_type, multi_task = multi_task) #,
loss, logits = outputs[:2]
outs = []
for logit in logits:
out = []
if q_type == 'FR':
max_arg = torch.argmax(logit)
# max_arg = torch.argmax(logits[0])
if dataset not in ["stepgame"]:
out = [candidate[max_arg.item()]]
else:
out = [max_arg.item()]
elif q_type == 'YN' :
max_arg = torch.argmax(logit)
# max_arg = torch.argmax(logits[0])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK'] # if only two class then this never happen
outs += [out]
return loss, outs #, out_logit
def spatial_relation_extraction(model, input_text, labels, device = "cuda:0"):
encoding = tokenizer.batch_encode_plus(input_, max_length=512)
input_ids, token_type_ids, attention_mask = encoding["input_ids"], encoding["token_type_ids"], encoding['attention_mask']
labels = torch.stack(labels)
outputs = model(input_ids, token_type_ids = token_type_ids, attention_mask = attention_mask, labels = labels)
if labels:
loss, logits = outputs[:2]
else:
loss = None
logits = outputs[:1]
outs = []
for logit in logits:
out_logit = [torch.argmax(log) for log in logit]
if torch.count_nonzero(out_logit[1:]) > 0:
out_logit[0] = 0
else:
out_logit[0] = 1
outs += [out_logit]
return loss, outs
def Masked_LM(model, text, question, answer, other, device, file):
# print(question, answer)
# input_ids = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
input_ids = tokenizer.encode_plus(question, text, return_tensors="pt")["input_ids"].to(device)
masked = []
for ind, i in enumerate(input_ids[0]):
if i == 103: masked.append(ind)
token_answer = [tokenizer.convert_tokens_to_ids(i) for i in tokenizing(answer)]
x = [-100] * len(input_ids[0])
for ind,i in enumerate(masked):
x[i] = token_answer[ind]
label_ids = torch.tensor([x], device = device)
# print("input_ids2",input_ids)
# print("label_ids",label_ids)
# segments_ids = torch.tensor([[0]* (len(input_ids))], device = device)
outputs = model(input_ids, labels = label_ids) #, token_type_ids=segments_ids)
# print(outputs)
loss, predictions = outputs[:2]
ground_truth = [label_ids[0][i].item() for i in masked]
truth_token = tokenizer.convert_ids_to_tokens(ground_truth)
print("truth token: ", truth_token, file = file)
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in masked ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicted_token, file = file)
print("pred_token: ", predicted_token, file = file)
return loss, predicted_index, ground_truth
def Masked_LM_random(model, text, seed_num, other, device, file):
input_ids = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
# print("input_ids1",input_ids)
# masked tokens
masked = []
# forbiden = [".", ",", "!", "'","?", 'the', 'of']
random.seed(seed_num)
masked = random.sample(range(1, len(input_ids[0])-1), int(len(input_ids[0])* 0.12))
# print("masks: ", masked[0], story_tokenized[masked[0]], file = file)
#unchanged tokens, just predict
not_changed = []
temp , not_changed_num= 0, int(len(input_ids[0])* 0.015)
while True:
x = random.choice(range(1, len(input_ids[0])-1))
if x not in masked: not_changed.append(x); temp+=1
if temp >= not_changed_num: break
#changed tokens
changed = []
temp , changed_num= 0, int(len(input_ids[0])* 0.015)
while True:
x = random.choice(range(1, len(input_ids[0])-1))
if x not in masked and x not in not_changed: changed.append(x); temp+=1
if temp >= changed_num: break
# print("nums",masked, not_changed, changed)
x = [-100] * len(input_ids[0])
for i in masked:
x[i] = input_ids[0][i].item()
input_ids[0][i] = tokenizer.convert_tokens_to_ids('[MASK]') #103 #[masked]
for i in not_changed:
x[i] = input_ids[0][i].item()
for i in changed:
changed_word =random.choice(range(0,30522))
x[i] = input_ids[0][i].item()
input_ids[0][i]= changed_word
label_ids = torch.tensor([x], device = device)
# print("input_ids2",input_ids)
# print("label_ids",label_ids)
segments_ids = torch.tensor([[0]* (len(input_ids))], device = device)
outputs = model(input_ids, labels = label_ids, token_type_ids=segments_ids) #, token_type_ids=segments_ids)
# print(outputs)
loss, predictions = outputs[:2]
predicted = []
ground_truth = [label_ids[0][i].item() for i in masked] + [label_ids[0][i].item() for i in not_changed] + [label_ids[0][i].item() for i in changed]
truth_token = tokenizer.convert_ids_to_tokens(ground_truth)
print("truth token: ", truth_token, file = file)
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in masked ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicted_token, file = file)
predicted += predicted_index
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in not_changed ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicted_t, file = fileoken)
predicted += predicted_index
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in changed ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicte, file = filed_token)
predicted += predicted_index
pred_token = tokenizer.convert_ids_to_tokens(predicted)
print("pred_token: ", pred_token, file = file)
return loss, predicted, ground_truth
# with just triplet classifier
def spatial_classification(model, sentence, triplet, label, device):
# print('triplet',triplet, label)
encoding = tokenizer.encode(sentence, add_special_tokens=True)
token_type_ids = [0]*len(encoding)
triplets_tokens = []
# print('encoded sentence', encoding)
if triplet['trajector'] != [-1,-1] :
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
for x in range(triplet['trajector'][0],triplet['trajector'][1]+1):
token_type_ids[x] = 1
else: triplets_tokens += [102]
if triplet['landmark'] != [-1,-1]:
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
for x in range(triplet['landmark'][0],triplet['landmark'][1]+1):
token_type_ids[x] = 1
else: triplets_tokens += [102]
if triplet['spatial_indicator'] != [-1,-1]:
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
for x in range(triplet['spatial_indicator'][0],triplet['spatial_indicator'][1]+1):
token_type_ids[x] = 1
# print('triple tokens: ', triplets_tokens)
token_type_ids = [0]*len(triplets_tokens) + token_type_ids
encoding =[ encoding[0]]+ triplets_tokens + encoding[1:]
token_type_ids = torch.tensor([token_type_ids], device = device)
inputs = torch.tensor(encoding, device = device).unsqueeze(0)
labels = torch.tensor([label], device = device).float()
# print(inputs.shape, labels.shape)
loss, logits = model(inputs, token_type_ids = token_type_ids, labels=labels)
# print(outputs)
# loss = outputs.loss
# logits = outputs.logits
# print('logits', logits)
predicted_index = 1 if logits[0] > 0.5 else 0
# predicted_index = torch.argmax(logits).item()
# print('predict', predicted_index)
return loss, predicted_index#, predicted_index
#triplet classifier and relation tyoe extraction
def spatial_type_classification(model, sentence, triplets, triplet_labels = None, type_labels = None, device= 'cuda:0', asked_class_label = None, other = None, asked = False):
if other == 'sptypeQA':
encoding = sentence[0]
else:
tokenized_text = tokenizing(sentence)
encoding = tokenizer.encode(sentence, add_special_tokens=True)
all_token_type_ids, triplets_input = [], []
for triplet in triplets:
token_type_ids = [0]*len(encoding)
triplets_tokens, triplets_tokens_pass, _triplets_tokens_pass = [], [], []
if triplet['trajector'] not in [['',''], [-1,-1]] :
if other == 'sptypeQA':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
for x in range(triplet['trajector'][0],triplet['trajector'][1]+1):
token_type_ids[x] = 1
if triplet['spatial_indicator'] not in [['',''], [-1,-1]]:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
for x in range(triplet['spatial_indicator'][0],triplet['spatial_indicator'][1]+1):
token_type_ids[x] = 1
# print('&&', tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]) # shouldn't consider [cls] so subtract 1
spatial_indicator = ''
if other != 'sptypeQA' and other != 'triplet':
for z in tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]:
spatial_indicator += z +' '
if triplet['landmark'] not in [['',''], [-1,-1]]:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
_triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
for x in range(triplet['landmark'][0],triplet['landmark'][1]+1):
token_type_ids[x] = 1
token_type_ids = [0]*len(triplets_tokens) + token_type_ids + [0]*(26-len(triplets_tokens)) #add PAD to the batch for 6 for larger land and traj and 4 for spatial_ndicator
all_token_type_ids += [token_type_ids]
if _triplets_tokens_pass: triplets_tokens_pass = _triplets_tokens_pass
if other == 'sptypeQA':
_encoding =[encoding[0].tolist()]+ triplets_tokens + encoding[1:].tolist()+ [0]*(26-len(triplets_tokens))
else: _encoding = [encoding[0]]+ triplets_tokens + encoding[1:]+ [0]*(26-len(triplets_tokens))
triplets_input += [_encoding]
token_type_ids = torch.tensor(all_token_type_ids, device = device)
inputs = torch.tensor(triplets_input, device = device)
if triplet_labels != None:
labels = [torch.tensor(triplet_labels, device = device).float(), torch.tensor(type_labels, device = device).long()]
# labels = [torch.tensor([label], device = device).float(), torch.tensor([type_label], device = device).long(), torch.tensor([asked_class_label if asked_class_label!= None else 0], device = device).float() ]
# print(inputs.shape, labels.shape)
loss, logits1, logits2 = model(inputs, token_type_ids = token_type_ids, labels=labels)
# loss, logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, labels=labels, asked_compute = asked_class_label)
# print(logits1)
else:
type_labels = None
loss = None
# _,logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, asked_compute = True)
_,logits1, logits2 = model(inputs, token_type_ids = token_type_ids)
predicted_index1 = [1 if x > 0.65 else 0 for x in logits1[0]]
# print('logits2', logits2)
predicted_index2 = [torch.argmax(x).item() for x in logits2[0]]
# if asked_class_label != None:
# predicted_index3 = 1 if logits3[0] > 0.65 else 0
# else: predicted_index3 = None
# predicted_index = torch.argmax(logits).item()
if triplet_labels == None and (other == 'sptypeQA' or other == ' triplet'):
if predicted_index1 == 1:
if other == 'sptypeQA':
return triplets_tokens_pass
#if we want to pass the triplet in the form of text
elif other == 'triplet':
#all_rel_types = ['NaN', 'LEFT','RIGHT','BELOW','ABOVE','NEAR', 'FAR', 'TPP', 'NTPP', 'NTPPI', 'EC']
triplet_sent = tokenizer.decode(triplets_tokens_pass)
triplet_sent = triplet_sent[:-5].replace('[SEP]',',')+'.'
###For replacing spatial indicator
#end = triplet_sent.rfind('[SEP]',0,len(triplet_sent)-5)
#triplet_sent = triplet_sent[:end].replace('[SEP]',',') + '. ' #+all_rel_types[predicted_index2]+'.'
# print('%%', triplet_sent)
return triplet_sent
#else
# triplet_sent = triplet_sent.replace('[SEP]',',')
# triplet_sent += ' '+all_rel_types[predicted_index2]+'.'
# triplet_sent = triplet_sent[:-1]+'.'
# print('In BERT, triplet tokens: ',triplet_sent)
else: return None
elif triplet_labels == None:
# if logits3:
return _, predicted_index1, predicted_index2#, predicted_index3
# else:
# return _, predicted_index1, predicted_index2
else:
# if logits3:
return loss, predicted_index1, predicted_index2#, predicted_index3 #, predicted_index
# else:
# return loss, predicted_index1, predicted_index2 #, predicted_index
def spatial_type_classification_before_batch(model, sentence, triplets, triplet_label = None, type_labels = None, device= 'cuda:0', asked_class_label = None, other = None, asked = False):
# print(sentence,'\n',triplet)
#tokenized sentence
if other == 'sptypeQA':
encoding = sentence[0]
else:
tokenized_text = tokenizing(sentence)
encoding = tokenizer.encode(sentence, add_special_tokens=True)
token_type_ids = [0]*len(encoding)
# if other == 'sptypeQA': triplets_tokens, triplets_tokens_pass = [], []
# else:
triplets_tokens, triplets_tokens_pass, _triplets_tokens_pass = [], [], []
# print('encoded sentence', encoding)
if triplet['trajector'] != ['','']:
if other == 'sptypeQA':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
for x in range(triplet['trajector'][0],triplet['trajector'][1]+1):
token_type_ids[x] = 1
if triplet['spatial_indicator'] != ['','']:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
for x in range(triplet['spatial_indicator'][0],triplet['spatial_indicator'][1]+1):
token_type_ids[x] = 1
# print('&&', tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]) # shouldn't consider [cls] so subtract 1
spatial_indicator = ''
if other != 'sptypeQA' and other != 'triplet':
for z in tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]:
spatial_indicator += z +' '
if triplet['landmark'] != ['','']:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
_triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
for x in range(triplet['landmark'][0],triplet['landmark'][1]+1):
token_type_ids[x] = 1
if _triplets_tokens_pass: triplets_tokens_pass += _triplets_tokens_pass
# print(spatial_indicator, 'type rel', type_label, all_rels_type)
# print()
token_type_ids = [0]*len(triplets_tokens) + token_type_ids
if other == 'sptypeQA':
encoding =[ encoding[0].tolist()]+ triplets_tokens + encoding[1:].tolist()
else: encoding = [encoding[0]]+ triplets_tokens + encoding[1:]
token_type_ids = torch.tensor([token_type_ids], device = device)
inputs = torch.tensor(encoding, device = device).unsqueeze(0)
logits3 = None
if label != None:
labels = [torch.tensor([label], device = device).float(), torch.tensor([type_label], device = device).long()]
# labels = [torch.tensor([label], device = device).float(), torch.tensor([type_label], device = device).long(), torch.tensor([asked_class_label if asked_class_label!= None else 0], device = device).float() ]
# print(inputs.shape, labels.shape)
loss, logits1, logits2 = model(inputs, token_type_ids = token_type_ids, labels=labels)
# loss, logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, labels=labels, asked_compute = asked_class_label)
# print(logits1)
else:
type_label = None
loss = None
# _,logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, asked_compute = True)
_,logits1, logits2 = model(inputs, token_type_ids = token_type_ids)
predicted_index1 = 1 if logits1[0] > 0.65 else 0
# print('logits2', logits2)
predicted_index2 = torch.argmax(logits2[0]).item()
# if asked_class_label != None:
# predicted_index3 = 1 if logits3[0] > 0.65 else 0
# else: predicted_index3 = None
# predicted_index = torch.argmax(logits).item()
if label == None and (other == 'sptypeQA' or other == ' triplet'):
if predicted_index1 == 1:
if other == 'sptypeQA':
return triplets_tokens_pass
#if we want to pass the triplet in the form of text
elif other == 'triplet':
#all_rel_types = ['NaN', 'LEFT','RIGHT','BELOW','ABOVE','NEAR', 'FAR', 'TPP', 'NTPP', 'NTPPI', 'EC']
triplet_sent = tokenizer.decode(triplets_tokens_pass)
triplet_sent = triplet_sent[:-5].replace('[SEP]',',')+'.'
###For replacing spatial indicator
#end = triplet_sent.rfind('[SEP]',0,len(triplet_sent)-5)
#triplet_sent = triplet_sent[:end].replace('[SEP]',',') + '. ' #+all_rel_types[predicted_index2]+'.'
# print('%%', triplet_sent)
return triplet_sent
#else
# triplet_sent = triplet_sent.replace('[SEP]',',')
# triplet_sent += ' '+all_rel_types[predicted_index2]+'.'
# triplet_sent = triplet_sent[:-1]+'.'
# print('In BERT, triplet tokens: ',triplet_sent)
else: return None
elif label == None:
# if logits3:
return _, predicted_index1, predicted_index2#, predicted_index3
# else:
# return _, predicted_index1, predicted_index2
else:
# if logits3:
return loss, predicted_index1, predicted_index2#, predicted_index3 #, predicted_index
# else:
# return loss, predicted_index1, predicted_index2 #, predicted_index
def token_classification(model, text, traj=None, land=None, indicator=None, other=None, device = 'cuda:0', file = ''):
loss, truth = '', ''
if other != 'sptypeQA':
inputs = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
else:
inputs = text
if traj or land or indicator:
labels = make_token_label(text, traj, land, indicator, other).to(device)
outputs = model(inputs, labels=labels)
loss = outputs.loss
truth = [element.item() for element in labels[0].flatten()]
else: outputs = model(inputs)
logits = outputs.logits
predicted_index = [torch.argmax(predict).item() for predict in logits[0] ]
return loss, predicted_index, truth
def make_token_label(text, traj, land, indicator,other):
# print(text, traj, land, indicator)
encoding = tokenizerFast(text, return_offsets_mapping= True)
token_starts = [item[0] for item in encoding['offset_mapping']]
token_ends = [item[1] for item in encoding['offset_mapping']]
# print(token_starts, token_ends)
# print()
# labels_id = ['O', 'B_traj', 'I_traj', 'B_land', 'I_land', 'B_spatial', 'I_spatial']
labels_id = ['O', 'B_entity', 'I_entity', 'B_indicator', 'I_indicator']
# text_token = tokenized_text #tokenizing(text)
labels = torch.tensor([0] * len(encoding['input_ids']))
#trajector
for ind, t in enumerate(traj):
# if t['start']!= '' and t['end']!= '':
# print(t)
#skip WRONG ANNOTATION
if t['start'] not in token_starts and t['end'] not in token_ends: continue
B_token = token_starts[1:-1].index(t['start'])+1
E_token = token_ends[1:-1].index(t['end'])+1
# print(B_token, E_token)
labels[B_token] = labels_id.index('B_entity')
for i in range(B_token+1, E_token+1):
labels[i] = labels_id.index('I_entity')
#landmark
for l in land:
# if l['start']!= '' and l['end']!= '':
# print(l)
#skip WRONG ANNOTATION
if l['start'] not in token_starts and l['end'] not in token_ends: continue
B_token = token_starts[1:-1].index(l['start'])+1
E_token = token_ends[1:-1].index(l['end'])+1
# print(B_token, E_token)
labels[B_token] = labels_id.index('B_entity')
for i in range(B_token+1, E_token+1):
labels[i] = labels_id.index('I_entity')
#spatial
for ind in indicator:
# if ind['start']!= '' and ind['end']!= '':
# print(ind)
#skip WRONG ANNOTATION or it is empty
if ind['start'] not in token_starts and ind['end'] not in token_ends: continue
B_token = token_starts[1:-1].index(ind['start'])+1
E_token = token_ends[1:-1].index(ind['end'])+1
# print(B_token, E_token)
labels[B_token] = labels_id.index('B_indicator')
for i in range(B_token+1, E_token+1):
labels[i] = labels_id.index('I_indicator')
# print('labels:', labels)
labels = labels.unsqueeze(0)
return labels
def extract_entity_token(text, traj, land, indicator, _tuple=False):
# print(text, traj, land, indicator)
encoding = tokenizerFast(text, return_offsets_mapping= True)
token_starts = [item[0] for item in encoding['offset_mapping']]
token_ends = [item[1] for item in encoding['offset_mapping']]
# print(token_starts, token_ends)
if _tuple:
token_index = {'trajector': [-1,-1], 'landmark': [-1,-1], 'spatial_indicator':[-1,-1], 'rel_type': ''}
# spatial_indicator = []
else:
token_index = {'trajector': [-1,-1], 'landmark': [-1,-1], 'spatial_indicator':[-1,-1]}
#trajector
if (traj['start']!= '' and traj['start']!= -1) and (traj['end']!= '' and traj['end']!= -1):
# print(t)
#start
token_index['trajector'][0]= token_starts[1:-1].index(traj['start'])+1
#end
token_index['trajector'][1]= token_ends[1:-1].index(traj['end'])+1
#landmark
if (land['start']!= '' and land['start']!= -1) and (land['end']!= '' and land['end']!= -1):
# print(l)
#start
token_index['landmark'][0]= token_starts[1:-1].index(land['start'])+1
#end
token_index['landmark'][1]= token_ends[1:-1].index(land['end'])+1
#spatial
if (indicator['start']!= '' and indicator['start']!= -1) and (indicator['end']!= '' and indicator['end']!= -1):
# if _tuple:
# spatial_indicator = [token_starts[1:-1].index(indicator['start'])+1, token_ends[1:-1].index(indicator['end'])+1]
# # print(ind)
# else:
token_index['spatial_indicator'][0] = token_starts[1:-1].index(indicator['start'])+1
token_index['spatial_indicator'][1] = token_ends[1:-1].index(indicator['end'])+1
# print('token_index:', token_index)
# if _tuple:
# return token_index, spatial_indicator
# else:
return token_index
# def boolean_classification_end2end(model, question, text, q_type, candidate ,correct_label, other, device):
def boolean_classification_end2end(model, questions, text, q_type, candidates ,correct_labels, other, device, story_annot =None, qs_annot=None, seperate = False):
# encoding = tokenizer.encode_plus(question, text)
# print(text, question, candidate, correct_label)
if seperate:
#seperate each sentence
sentences = [h+'.' for h in text.split('. ')]
sentences[-1] = sentences[-1][:-1]
text_tokenized = tokenizer(sentences, return_tensors="pt", padding=True)["input_ids"].to(device)
else:
text_tokenized = tokenizer(text, return_tensors="pt", padding=True)["input_ids"].to(device)
# print(text,'\n', text_tokenized)
# print('&&', questions)
qs_tokenized = tokenizer(questions, return_tensors="pt", padding=True)["input_ids"].to(device)
# print(qs_tokenized)
# print('## text+qs_tokenized: ',text_tokenized)
# qs_tokenized = tokenizer(question, return_tensors="pt")["input_ids"].to(device)
# print('### qs_tokenized: ',qs_tokenized)
# inputs = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
# if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
input_ids, token_type_ids = [], []
labels =[]
for _ind,correct_label in enumerate(correct_labels):
if q_type == 'CO':
label = torch.tensor([[0]]*2, device = device).long()
# candid_tokenized = tokenizer(candidate[:2], return_tensors="pt", padding=True)["input_ids"].to(device)
for opt in candidates[_ind][:2]:
# tokenized_opt = tokenizing(opt)
# num_tok = len(tokenized_opt)
# encoded_options = tokenizer.encode(tokenized_opt + ['[PAD]']*(max_len - num_tok))#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if correct_label == [0] or correct_label == [2]: label[0][0] = 1
if correct_label == [1] or correct_label == [2]: label[1][0] = 1
elif q_type == 'FB':
label = torch.tensor([[0]]*len(candidates[_ind]), device = device).long()
candid_tokenized = tokenizer(candidates[_ind], return_tensors="pt", padding=True)["input_ids"].to(device)
for opt in candidates[_ind]:
# tokenized_opt = tokenizing(opt)
# # num_tok = len(tokenized_opt)
# encoded_options = tokenizer.encode(tokenized_opt)#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if 'A' in correct_label: label[0][0] = 1
if 'B' in correct_label: label[1][0] = 1
if 'C' in correct_label: label[2][0] = 1
elif q_type == 'FR':
# _input = []
label = torch.tensor([0]*7, device = device).long()
for ind, opt in enumerate(candidates[_ind][:7]):
# _input += [text_tokenized]
if ind in correct_label:label[ind] = 1
# text_tokenized = torch.stack(_input)
# print('label', label)
# elif q_type == 'YN' and other == "DK": #and candidate != ['babi']:
# if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
# else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
# elif q_type == 'YN' and other == "noDK":
# if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
# elif q_type == 'YN' and candidate == ['boolq']:
# if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# # else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
elif q_type == 'YN': #and candidate != ['babi']:
if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = text_tokenized
# else : label = torch.tensor([0,0], device = device).long()
# elif q_type == 'YN' and candidate == ['babi']:
# label = torch.tensor([1,0], device = device).long() if correct_label == ['Yes'] else torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
labels += [label]
labels = torch.stack(labels).to(device)
# print('$', correct_labels)
# print('$$', labels, type(labels))
# input_ids = torch.tensor(input_ids, device = device)
# print('input shape, label shape',text_tokenized.shape, qs_tokenized.shape, labels.shape)
if other == 'supervised':
_outputs = model(text_tokenized, qs_tokenized, story_annotations = story_annot, questions_annotations = qs_annot, labels=labels)
else:
_outputs = model(text_tokenized, qs_tokenized, labels=labels)
# print('&&&&&&&&&& outputs', outputs)
losses, outs = [], []
for outputs in _outputs:
loss, logits = outputs[:2]
# print('$$', loss)
losses += [loss]
# print("loss, logits ", loss, logits)
out_logit = [torch.argmax(log) for log in logits]
out = [0]
if q_type == 'FR':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 2 in out and 3 in out:
if logits[2][1] >= logits[3][1]:
out.remove(3)
else:
out.remove(2)
if 0 in out and 1 in out:
if logits[0][1] >= logits[1][1]:
out.remove(1)
else:
out.remove(0)
if 4 in out and 5 in out:
if logits[4][1] >= logits[5][1]:
out.remove(5)
else:
out.remove(4)
if out == []: out = [7]
elif q_type == 'FB':
blocks = ['A', 'B', 'C']
out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# if 'C' in out and 'C' not in candidate: out.remove('C')
elif q_type == 'CO':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 0 in out and 1 in out:
out = [2]
elif out == []: out = [3]
elif q_type == 'YN' and other == 'multiple_class':
max_arg = torch.argmax(logits)
# print(correct_label, logits, max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'DK' and (candidates == ['babi'] or candidates == ['boolq']):
# print('logits: ', logits)
max_arg = torch.argmax(logits[:2, 1])
# print("2", max_arg )
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
elif q_type == 'YN' and other == 'DK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg , logits)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'noDK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
# else: out = ['DK']
elif q_type == 'YN' and other == 'change_model':
# max_arg = torch.argmax(logits[:, 1])
# if max_arg.item() == 0: out = ['Yes']
# elif max_arg.item() == 1: out = ['No']
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and candidates != ['babi']:
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
# if out_logit[0] == out_logit[1]:
# if out_logit[0].item() == 0: out = ['DK']
# else:
# max_arg = torch.argmax(logits[: , 1])
# out = ['Yes'] if max_arg.item() == 0 else ['No']
# else: out = ['Yes'] if out_logit[0].item() == 1 else ['No']
elif q_type == 'YN' and (candidates == ['babi'] or candidates == ['boolq']):
max_arg = torch.argmax(logits[:, 1])
out = ['Yes'] if max_arg.item() == 0 else ['No']
outs += [out]
losses = torch.stack(losses)
# print('out logit: ', outs, losses)
return losses, outs #, out_logit
def boolean_classification_addSpRL(model, questions, text, q_type, candidates ,correct_labels, other, device, seperate = False, gt_triplets = None, dataset = None):
# encoding = tokenizer.encode_plus(question, text)
# print(text, question, candidate, correct_label)
z = model.options
attention_mask_s = None,
attention_mask_q = None
if seperate or "q+s" not in model.options :
#seperate each sentence
if dataset in ["stepgame", "sprlqa"]:
sentences = text
else:
sentences = [h+'.' for h in text.split('. ')]
sentences[-1] = sentences[-1][:-1]
#sentences = [question] + sentences #the first sentence always is the question
# print('\nSentences',sentences)
_text_tokenized = tokenizer(sentences, return_tensors="pt", padding=True, return_attention_mask=True)
text_tokenized = _text_tokenized["input_ids"].to(device)
attention_mask_s = _text_tokenized["attention_mask"].to(device)
# print('sentence', sentences, text_tokenized)
else:
text_tokenized = tokenizer(text, return_tensors="pt", padding=True)["input_ids"].to(device)
# print(text,'\n', text_tokenized)
# print('Question', questions)
_qs_tokenized = tokenizer(questions, return_tensors="pt", padding=True, return_attention_mask=True)
qs_tokenized = _qs_tokenized["input_ids"].to(device)
attention_mask_q = _qs_tokenized["attention_mask"].to(device)
# print('## text+qs_tokenized: ',text_tokenized)
# qs_tokenized = tokenizer(question, return_tensors="pt")["input_ids"].to(device)
# print('### qs_tokenized: ',qs_tokenized)
# inputs = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
# if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
input_ids, token_type_ids = [], []
labels =[]
for _ind,correct_label in enumerate(correct_labels):
if q_type == 'CO':
label = torch.tensor([[0]]*2, device = device).long()
# candid_tokenized = tokenizer(candidate[:2], return_tensors="pt", padding=True)["input_ids"].to(device)
for opt in candidates[_ind][:2]:
# tokenized_opt = tokenizing(opt)
# num_tok = len(tokenized_opt)
# encoded_options = tokenizer.encode(tokenized_opt + ['[PAD]']*(max_len - num_tok))#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if correct_label == [0] or correct_label == [2]: label[0][0] = 1
if correct_label == [1] or correct_label == [2]: label[1][0] = 1
elif q_type == 'FB':
label = torch.tensor([[0]]*len(candidates[_ind]), device = device).long()
candid_tokenized = tokenizer(candidates[_ind], return_tensors="pt", padding=True)["input_ids"].to(device)
# for opt in candidates[_ind]:
# # tokenized_opt = tokenizing(opt)
# # # num_tok = len(tokenized_opt)
# # encoded_options = tokenizer.encode(tokenized_opt)#[1:]
# input_ids += [encoded_options + encoding["input_ids"][1:]]
if 'A' in correct_label: label[0][0] = 1
if 'B' in correct_label: label[1][0] = 1
if 'C' in correct_label: label[2][0] = 1
elif q_type == 'FR':
if dataset != 'stepgame':
label = torch.tensor([0]*7, device = device).long()
for ind, opt in enumerate(candidates[_ind][:7]):
if ind in correct_label:label[ind] = 1
else:
label = torch.tensor([correct_label], device=device).long()
# elif q_type == 'YN' and other == "DK": #and candidate != ['babi']:
# if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
# else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
elif q_type == 'YN' and other == "noDK":
if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
# elif q_type == 'YN' and candidate == ['boolq']:
# if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# # else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
elif q_type == 'YN': #and candidate != ['babi']:
if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = text_tokenized
# else : label = torch.tensor([0,0], device = device).long()
# elif q_type == 'YN' and candidate == ['babi']:
# label = torch.tensor([1,0], device = device).long() if correct_label == ['Yes'] else torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
labels += [label]
labels = torch.stack(labels).to(device)
# print('$', correct_labels)
# print('$$', labels, type(labels))
# input_ids = torch.tensor(input_ids, device = device)
# print('input shape, label shape',text_tokenized.shape, qs_tokenized.shape, labels.shape)
_outputs, extracted_triplets_index = model(text_tokenized, qs_tokenized, labels=labels, attention_mask_s = attention_mask_s, attention_mask_q = attention_mask_q, gt_triplets = gt_triplets)
# print('&&&&&&&&&& outputs', outputs)
losses, outs = [], []
for outputs in _outputs:
loss, logits = outputs[:2]
# print('$$', loss)
losses += [loss]
# print("loss, logits ", loss, logits)
out_logit = [torch.argmax(log) for log in logits]
out = [0]
if q_type == 'FR':
if dataset != 'stepgame':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 2 in out and 3 in out:
if logits[2][1] >= logits[3][1]:
out.remove(3)
else:
out.remove(2)
if 0 in out and 1 in out:
if logits[0][1] >= logits[1][1]:
out.remove(1)
else:
out.remove(0)
if 4 in out and 5 in out:
if logits[4][1] >= logits[5][1]:
out.remove(5)
else:
out.remove(4)
if out == []: out = [7]
else:
out = [out_logit[0].item()]
elif q_type == 'FB':
blocks = ['A', 'B', 'C']
out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# if 'C' in out and 'C' not in candidate: out.remove('C')
elif q_type == 'CO':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 0 in out and 1 in out:
out = [2]
elif out == []: out = [3]
elif q_type == 'YN' and other == 'multiple_class':
max_arg = torch.argmax(logits)
# print(correct_label, logits, max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'DK' and (candidates == ['babi'] or candidates == ['boolq']):
# print('logits: ', logits)
max_arg = torch.argmax(logits[:2, 1])
# print("2", max_arg )
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
elif q_type == 'YN' and other == 'DK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg , logits)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'noDK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
# else: out = ['DK']
elif q_type == 'YN' and other == 'change_model':
# max_arg = torch.argmax(logits[:, 1])
# if max_arg.item() == 0: out = ['Yes']
# elif max_arg.item() == 1: out = ['No']
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and candidates != ['babi']:
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
# if out_logit[0] == out_logit[1]:
# if out_logit[0].item() == 0: out = ['DK']
# else:
# max_arg = torch.argmax(logits[: , 1])
# out = ['Yes'] if max_arg.item() == 0 else ['No']
# else: out = ['Yes'] if out_logit[0].item() == 1 else ['No']
elif q_type == 'YN' and (candidates == ['babi'] or candidates == ['boolq']):
max_arg = torch.argmax(logits[:, 1])
out = ['Yes'] if max_arg.item() == 0 else ['No']
outs += [out]
losses = torch.stack(losses)
# print('out logit: ', outs, losses)
return losses, outs, extracted_triplets_index #, out_logit
def tokenizing(text):
encoding = tokenizer.tokenize(text)
return encoding
def Prune_FR_answer_based_on_constraints(out_logit, logits, candidate_answer):
"""
for i in answer:
check all the incompatibel rels. add those which are 1, from those find the highest score. add that to the final answer.
if "DK" in candiate answer consider empty else choose the highest true
"""
#translate the candidate answers in spartqa to relation_types
if candidate_answer == ['left', 'right', 'above', 'below', 'near to', 'far from', 'touching', 'DK']:
candidate_answer = ['left', 'right', 'above', 'below', 'near', 'far', 'ec', 'DK'] #touching is alway EC since we only ask about relations between blocks in spartqa
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
selected_output = []
checked_rel = []
for ans in out:
if ans in checked_rel: continue
checked_rel += [ans]
all_incomp_index = [candidate_answer.index(incomp_rel.lower()) for incomp_rel in incompatible_relations[candidate_answer[ans].upper()] if incomp_rel.lower() in candidate_answer and candidate_answer.index(incomp_rel.lower()) in out]
if all_incomp_index == [] :
selected_output += [ans]
continue
#select the highest
max_id = ans
# checked_rel = []
for ind in all_incomp_index:
checked_rel += [ind]
if logits[ind][1] >= logits[max_id][1]:
max_id = ind
selected_output += [max_id]
if selected_output: out = selected_output
if out == []:
if "DK" in candidate_answer:
out = [candidate_answer.index("DK")]
else:
out = [torch.argmax(logits[:, 1]).item()]
return out
incompatible_relations = {"DC": ["EC", "PO", "EQ", "NTPP", "NTPPI", "TPP", "TPPI"],
"EC": ["DC", "PO", "EQ", "NTPP", "NTPPI", "TPP", "TPPI"],
"PO": ["DC", "EC", "EQ", "NTPP", "NTPPI", "TPP", "TPPI"],
"NTPP": ["DC", "EC", "PO", "EQ", "NTPPI", "TPP", "TPPI"],
"NTPPI": ["DC", "EC", "PO", "EQ", "NTPP", "TPP", "TPPI"],
"TPP": ["DC", "EC", "PO", "EQ", "NTPP", "NTPPI", "TPPI"],
"TPPI": ["DC", "EC", "PO", "EQ", "NTPP", "NTPPI", "TPP"],
"EQ": ["DC", "EC", "PO", "NTPP", "NTPPI", "TPP", "TPPI"],
'RIGHT': ["LEFT"],
'LEFT':["RIGHT"],
'BELOW':["ABOVE"],
'ABOVE':["BELOW"],
"BEHIND": ["FRONT"],
"FRONT": ["BEHIND"],
'FAR':["NEAR"],
'NEAR':["FAR"]
}
# reverse = {"DC": ["EC", "PO", "EQ", "NTPP", "NTPPI", "TPP", "TPPI"],
# "EC": ["DC", "PO", "EQ", "NTPP", "NTPPI", "TPP", "TPPI"],
# "PO": ["DC", "EC", "EQ", "NTPP", "NTPPI", "TPP", "TPPI"],
# "NTPP": ["DC", "EC", "PO", "EQ", "NTPPI", "TPP", "TPPI"],
# "NTPPI": ["DC", "EC", "PO", "EQ", "NTPP", "TPP", "TPPI"],
# "TPP": ["DC", "EC", "PO", "EQ", "NTPP", "NTPPI", "TPPI"],
# "TPPI": ["DC", "EC", "PO", "EQ", "NTPP", "NTPPI", "TPP"],
# "EQ": ["DC", "EC", "PO", "NTPP", "NTPPI", "TPP", "TPPI"],
# 'RIGHT': ["LEFT"],
# 'LEFT':["RIGHT"],
# 'BELOW':["ABOVE"],
# 'ABOVE':["BELOW"],
# "BEHIND": ["FRONT"],
# "FRONT": ["BEHIND"],
# 'FAR':["FAR"],
# 'NEAR':["NEAR"]
# }
# return reverse[rel]
| 58,129 | 40.25621 | 239 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/BERT.py |
# checking with BERT
from torchnlp.nn import attention
from transformers import BertTokenizer, BertTokenizerFast
import torch
import random
import torch.nn as nn
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizerFast = BertTokenizerFast.from_pretrained('bert-base-uncased')
def question_answering(model, question, text, correct_label, device):
encoding = tokenizer.encode_plus(question, text)
input_ids, token_type_ids = encoding["input_ids"], encoding["token_type_ids"]
target_start = torch.tensor([correct_label[0]], device = device)
target_end = torch.tensor([correct_label[1]], device = device)
loss, start_scores, end_scores = model(torch.tensor([input_ids]).to(device), token_type_ids=torch.tensor([token_type_ids]).to(device), start_positions= target_start , end_positions= target_end)
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
return loss, answer, torch.argmax(start_scores), torch.argmax(end_scores)
# def multiple_choice(model, question, text, candidate ,correct_label, device):
# encoding = tokenizer.encode_plus(question, text)
# max_len = max([len(tokenizing(opt)) for opt in candidate])
# input_ids, token_type_ids = [], []
# for opt in candidate:
# tokenized_opt = tokenizing(opt)
# num_tok = len(tokenized_opt)
# encoded_options = tokenizer.encode(tokenized_opt + ['[PAD]']*(max_len - num_tok))#[1:]
# input_ids += [encoded_options + encoding["input_ids"][1:]]
# token_type_ids += [[0]*(max_len+1) + encoding["token_type_ids"]]
# input_ids = torch.tensor(input_ids, device = device).unsqueeze(0)
# token_type_ids = torch.tensor(token_type_ids, device = device).unsqueeze(0)
# labels = torch.tensor(correct_label[0], device = device).unsqueeze(0) # Batch size 1
# outputs = model(input_ids, labels=labels)
# loss, classification_scores = outputs[:2]
# return loss, torch.argmax(classification_scores)
def boolean_classification(model, question, text, q_type, candidate ,correct_label, other, device, dataset = ""):
encoding = tokenizer.encode_plus(question, text, max_length=512)
# print('%%',text, question, candidate, correct_label)
# print('&&', encoding)
if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
input_ids, token_type_ids = [], []
if q_type == 'CO':
labels = torch.tensor([[0]]*2, device = device).long()
for opt in candidate[:2]:
tokenized_opt = tokenizing(opt)
num_tok = len(tokenized_opt)
encoded_options = tokenizer.encode(tokenized_opt + ['[PAD]']*(max_len - num_tok))#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if correct_label == [0] or correct_label == [2]: labels[0][0] = 1
if correct_label == [1] or correct_label == [2]: labels[1][0] = 1
elif q_type == 'FR':
if dataset == "spartqa":
labels = torch.tensor([0]*(len(candidate)-1), device = device).long()
for ind, opt in enumerate(candidate[:-1]):
input_ids += [encoding["input_ids"]]
if ind in correct_label:labels[ind] = 1
else:
labels = torch.tensor([0]*(len(candidate)), device = device).long()
for ind, opt in enumerate(candidate):
input_ids += [encoding["input_ids"]]
if ind in correct_label:labels[ind] = 1
if q_type == 'FB':
labels = torch.tensor([[0]]*len(candidate), device = device).long()
for opt in candidate:
tokenized_opt = tokenizing(opt)
# num_tok = len(tokenized_opt)
encoded_options = tokenizer.encode(tokenized_opt)#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if 'A' in correct_label: labels[0][0] = 1
if 'B' in correct_label: labels[1][0] = 1
if 'C' in correct_label: labels[2][0] = 1
elif q_type == 'YN' and (other == "noDK" or dataset in[ "sprlqa", "babi", "boolq", "spartun"]):
if correct_label == ['Yes']: labels = torch.tensor([1,0], device = device).long()
elif correct_label == ['No']: labels = torch.tensor([0,1], device = device).long()
input_ids = [encoding["input_ids"]]
elif q_type == 'YN': #and candidate != ['babi']:
if correct_label == ['Yes']: labels = torch.tensor([1,0,0], device = device).long()
elif correct_label == ['No']: labels = torch.tensor([0,1,0], device = device).long()
else: labels = torch.tensor([0,0,1], device = device).long()
input_ids = [encoding["input_ids"]]
# else : labels = torch.tensor([0,0], device = device).long()
# elif q_type == 'YN' and candidate == ['babi']:
# labels = torch.tensor([1,0], device = device).long() if correct_label == ['Yes'] else torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
input_ids = torch.tensor(input_ids, device = device)
# print('input_id', labels)
# print(input_ids.shape, labels.shape)
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
# print("loss, logits ", loss, logits)
out_logit = [torch.argmax(log) for log in logits]
out = [0]
if q_type == 'FR':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 2 in out and 3 in out:
if logits[2][1] >= logits[3][1]:
out.remove(3)
else:
out.remove(2)
if 0 in out and 1 in out:
if logits[0][1] >= logits[1][1]:
out.remove(1)
else:
out.remove(0)
if 4 in out and 5 in out:
if logits[4][1] >= logits[5][1]:
out.remove(5)
else:
out.remove(4)
if out == []: out = [7]
elif q_type == 'FB':
blocks = ['A', 'B', 'C']
out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# if 'C' in out and 'C' not in candidate: out.remove('C')
elif q_type == 'CO':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 0 in out and 1 in out:
out = [2]
elif out == []: out = [3]
elif q_type == 'YN' and other == 'multiple_class':
max_arg = torch.argmax(logits)
# print(correct_label, logits, max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
# elif q_type == 'YN' and other == 'DK' and (candidate == ['babi'] or candidate == ['boolq']):
# # print('logits: ', logits)
# max_arg = torch.argmax(logits[:2, 1])
# # print("2", max_arg )
# if max_arg.item() == 0: out = ['Yes']
# elif max_arg.item() == 1: out = ['No']
elif q_type == 'YN' and other == 'DK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg , logits)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and (other == 'noDK' or other in ['sprlqa', 'babi', 'boolq', 'spartun']):
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: # never come here
#if they don't have higher 1 so we check the lowest 0
out = ['No'] if torch.argmax(logits[:, 0]) == 0 else ['Yes']
# else: out = ['DK']
elif q_type == 'YN' and other == 'change_model':
# max_arg = torch.argmax(logits[:, 1])
# if max_arg.item() == 0: out = ['Yes']
# elif max_arg.item() == 1: out = ['No']
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and candidate != ['babi']:
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
# print('%%',logits, out)
# if out_logit[0] == out_logit[1]:
# if out_logit[0].item() == 0: out = ['DK']
# else:
# max_arg = torch.argmax(logits[: , 1])
# out = ['Yes'] if max_arg.item() == 0 else ['No']
# else: out = ['Yes'] if out_logit[0].item() == 1 else ['No']
# elif q_type == 'YN' and (candidate == ['babi'] or candidate == ['boolq']):
# max_arg = torch.argmax(logits[:, 1])
# out = ['Yes'] if max_arg.item() == 0 else ['No']
# print('out logit: ', out)
return loss, out #, out_logit
def multiple_classification(model, question, text, q_type, candidate ,correct_label, other, device):
encoding = tokenizer.encode_plus(question, text)
# print(text, question, candidate, correct_label)
# if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
input_ids, token_type_ids = [], []
if q_type == 'FR':
labels = torch.tensor([correct_label], device = device).long()
# for ind, opt in enumerate(candidate[:7]):
# input_ids += [encoding["input_ids"]]
# if ind in correct_label:labels[ind] = 1
elif q_type == 'YN' and other == "DK": #and candidate != ['babi']:
if correct_label == ['Yes']: labels = torch.tensor([0], device = device).long()
elif correct_label == ['No']: labels = torch.tensor([1], device = device).long()
else: labels = torch.tensor([2], device = device).long()
elif q_type == 'YN' and other == "noDK":
if correct_label == ['Yes']: labels = torch.tensor([0], device = device).long()
elif correct_label == ['No']: labels = torch.tensor([1], device = device).long()
elif q_type == 'YN': #and candidate != ['babi']:
if correct_label == ['Yes']: labels = torch.tensor([0], device = device).long()
elif correct_label == ['No']: labels = torch.tensor([1], device = device).long()
else: labels = torch.tensor([2], device = device).long()
# elif q_type == 'YN' and candidate == ['babi']:
# labels = torch.tensor([1,0], device = device).long() if correct_label == ['Yes'] else torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
input_ids = torch.tensor( [encoding["input_ids"]], device = device)
# print(input_ids.shape, labels.shape, labels)
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
# print("loss, logits ", loss, logits)
# out_logit = [torch.argmax(log) for log in logits]
out = [0]
if q_type == 'YN':
max_arg = torch.argmax(logits[0])
# # print(correct_label, max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'FR':
max_arg = torch.argmax(logits[0])
out = [candidate[max_arg.item()]]
# print('out: ', out)
return loss, out #, out_logit
def Masked_LM(model, text, question, answer, other, device, file):
# print(question, answer)
# input_ids = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
input_ids = tokenizer.encode_plus(question, text, return_tensors="pt")["input_ids"].to(device)
masked = []
for ind, i in enumerate(input_ids[0]):
if i == 103: masked.append(ind)
token_answer = [tokenizer.convert_tokens_to_ids(i) for i in tokenizing(answer)]
x = [-100] * len(input_ids[0])
for ind,i in enumerate(masked):
x[i] = token_answer[ind]
label_ids = torch.tensor([x], device = device)
# print("input_ids2",input_ids)
# print("label_ids",label_ids)
# segments_ids = torch.tensor([[0]* (len(input_ids))], device = device)
outputs = model(input_ids, labels = label_ids) #, token_type_ids=segments_ids)
# print(outputs)
loss, predictions = outputs[:2]
ground_truth = [label_ids[0][i].item() for i in masked]
truth_token = tokenizer.convert_ids_to_tokens(ground_truth)
print("truth token: ", truth_token, file = file)
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in masked ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicted_token, file = file)
print("pred_token: ", predicted_token, file = file)
return loss, predicted_index, ground_truth
def Masked_LM_random(model, text, seed_num, other, device, file):
input_ids = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
# print("input_ids1",input_ids)
# masked tokens
masked = []
# forbiden = [".", ",", "!", "'","?", 'the', 'of']
random.seed(seed_num)
masked = random.sample(range(1, len(input_ids[0])-1), int(len(input_ids[0])* 0.12))
# print("masks: ", masked[0], story_tokenized[masked[0]], file = file)
#unchanged tokens, just predict
not_changed = []
temp , not_changed_num= 0, int(len(input_ids[0])* 0.015)
while True:
x = random.choice(range(1, len(input_ids[0])-1))
if x not in masked: not_changed.append(x); temp+=1
if temp >= not_changed_num: break
#changed tokens
changed = []
temp , changed_num= 0, int(len(input_ids[0])* 0.015)
while True:
x = random.choice(range(1, len(input_ids[0])-1))
if x not in masked and x not in not_changed: changed.append(x); temp+=1
if temp >= changed_num: break
# print("nums",masked, not_changed, changed)
x = [-100] * len(input_ids[0])
for i in masked:
x[i] = input_ids[0][i].item()
input_ids[0][i] = tokenizer.convert_tokens_to_ids('[MASK]') #103 #[masked]
for i in not_changed:
x[i] = input_ids[0][i].item()
for i in changed:
changed_word =random.choice(range(0,30522))
x[i] = input_ids[0][i].item()
input_ids[0][i]= changed_word
label_ids = torch.tensor([x], device = device)
# print("input_ids2",input_ids)
# print("label_ids",label_ids)
segments_ids = torch.tensor([[0]* (len(input_ids))], device = device)
outputs = model(input_ids, labels = label_ids, token_type_ids=segments_ids) #, token_type_ids=segments_ids)
# print(outputs)
loss, predictions = outputs[:2]
predicted = []
ground_truth = [label_ids[0][i].item() for i in masked] + [label_ids[0][i].item() for i in not_changed] + [label_ids[0][i].item() for i in changed]
truth_token = tokenizer.convert_ids_to_tokens(ground_truth)
print("truth token: ", truth_token, file = file)
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in masked ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicted_token, file = file)
predicted += predicted_index
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in not_changed ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicted_t, file = fileoken)
predicted += predicted_index
predicted_index = [torch.argmax(predictions[0, predict]).item() for predict in changed ]
predicted_token = tokenizer.convert_ids_to_tokens(predicted_index)
# print(predicted_index,predicte, file = filed_token)
predicted += predicted_index
pred_token = tokenizer.convert_ids_to_tokens(predicted)
print("pred_token: ", pred_token, file = file)
return loss, predicted, ground_truth
# with just triplet classifier
def spatial_classification(model, sentence, triplet, label, device):
# print('triplet',triplet, label)
encoding = tokenizer.encode(sentence, add_special_tokens=True)
token_type_ids = [0]*len(encoding)
triplets_tokens = []
# print('encoded sentence', encoding)
if triplet['trajector'] != [-1,-1] :
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
for x in range(triplet['trajector'][0],triplet['trajector'][1]+1):
token_type_ids[x] = 1
else: triplets_tokens += [102]
if triplet['landmark'] != [-1,-1]:
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
for x in range(triplet['landmark'][0],triplet['landmark'][1]+1):
token_type_ids[x] = 1
else: triplets_tokens += [102]
if triplet['spatial_indicator'] != [-1,-1]:
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
for x in range(triplet['spatial_indicator'][0],triplet['spatial_indicator'][1]+1):
token_type_ids[x] = 1
# print('triple tokens: ', triplets_tokens)
token_type_ids = [0]*len(triplets_tokens) + token_type_ids
encoding =[ encoding[0]]+ triplets_tokens + encoding[1:]
token_type_ids = torch.tensor([token_type_ids], device = device)
inputs = torch.tensor(encoding, device = device).unsqueeze(0)
labels = torch.tensor([label], device = device).float()
# print(inputs.shape, labels.shape)
loss, logits = model(inputs, token_type_ids = token_type_ids, labels=labels)
# print(outputs)
# loss = outputs.loss
# logits = outputs.logits
# print('logits', logits)
predicted_index = 1 if logits[0] > 0.5 else 0
# predicted_index = torch.argmax(logits).item()
# print('predict', predicted_index)
return loss, predicted_index#, predicted_index
#triplet classifier and relation tyoe extraction
def spatial_type_classification(model, sentence, triplets, triplet_labels = None, type_labels = None, device= 'cuda:0', asked_class_label = None, other = None, asked = False):
if other == 'sptypeQA':
encoding = sentence[0]
else:
tokenized_text = tokenizing(sentence)
encoding = tokenizer.encode(sentence, add_special_tokens=True)
all_token_type_ids, triplets_input = [], []
for triplet in triplets:
token_type_ids = [0]*len(encoding)
triplets_tokens, triplets_tokens_pass, _triplets_tokens_pass = [], [], []
if triplet['trajector'] not in [['',''], [-1,-1]] :
if other == 'sptypeQA':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
for x in range(triplet['trajector'][0],triplet['trajector'][1]+1):
token_type_ids[x] = 1
if triplet['spatial_indicator'] not in [['',''], [-1,-1]]:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
for x in range(triplet['spatial_indicator'][0],triplet['spatial_indicator'][1]+1):
token_type_ids[x] = 1
# print('&&', tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]) # shouldn't consider [cls] so subtract 1
spatial_indicator = ''
if other != 'sptypeQA' and other != 'triplet':
for z in tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]:
spatial_indicator += z +' '
if triplet['landmark'] not in [['',''], [-1,-1]]:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
_triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
for x in range(triplet['landmark'][0],triplet['landmark'][1]+1):
token_type_ids[x] = 1
token_type_ids = [0]*len(triplets_tokens) + token_type_ids + [0]*(26-len(triplets_tokens)) #add PAD to the batch for 6 for larger land and traj and 4 for spatial_ndicator
all_token_type_ids += [token_type_ids]
if _triplets_tokens_pass: triplets_tokens_pass = _triplets_tokens_pass
if other == 'sptypeQA':
_encoding =[encoding[0].tolist()]+ triplets_tokens + encoding[1:].tolist()+ [0]*(26-len(triplets_tokens))
else: _encoding = [encoding[0]]+ triplets_tokens + encoding[1:]+ [0]*(26-len(triplets_tokens))
triplets_input += [_encoding]
token_type_ids = torch.tensor(all_token_type_ids, device = device)
inputs = torch.tensor(triplets_input, device = device)
# print(inputs, token_type_ids, triplets)
if triplet_labels != None:
labels = [torch.tensor(triplet_labels, device = device).float(), torch.tensor(type_labels, device = device).long()]
# labels = [torch.tensor([label], device = device).float(), torch.tensor([type_label], device = device).long(), torch.tensor([asked_class_label if asked_class_label!= None else 0], device = device).float() ]
# print(inputs.shape, labels.shape)
loss, logits1, logits2 = model(inputs, token_type_ids = token_type_ids, labels=labels)
# loss, logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, labels=labels, asked_compute = asked_class_label)
# print(logits1)
else:
type_labels = None
loss = None
# _,logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, asked_compute = True)
_,logits1, logits2 = model(inputs, token_type_ids = token_type_ids)
predicted_index1 = [1 if x > 0.65 else 0 for x in logits1[0]]
# print('logits2', logits2)
predicted_index2 = [torch.argmax(x).item() for x in logits2[0]]
# if asked_class_label != None:
# predicted_index3 = 1 if logits3[0] > 0.65 else 0
# else: predicted_index3 = None
# predicted_index = torch.argmax(logits).item()
if triplet_labels == None and (other == 'sptypeQA' or other == ' triplet'):
if predicted_index1 == 1:
if other == 'sptypeQA':
return triplets_tokens_pass
#if we want to pass the triplet in the form of text
elif other == 'triplet':
#all_rel_types = ['NaN', 'LEFT','RIGHT','BELOW','ABOVE','NEAR', 'FAR', 'TPP', 'NTPP', 'NTPPI', 'EC']
triplet_sent = tokenizer.decode(triplets_tokens_pass)
triplet_sent = triplet_sent[:-5].replace('[SEP]',',')+'.'
###For replacing spatial indicator
#end = triplet_sent.rfind('[SEP]',0,len(triplet_sent)-5)
#triplet_sent = triplet_sent[:end].replace('[SEP]',',') + '. ' #+all_rel_types[predicted_index2]+'.'
# print('%%', triplet_sent)
return triplet_sent
#else
# triplet_sent = triplet_sent.replace('[SEP]',',')
# triplet_sent += ' '+all_rel_types[predicted_index2]+'.'
# triplet_sent = triplet_sent[:-1]+'.'
# print('In BERT, triplet tokens: ',triplet_sent)
else: return None
elif triplet_labels == None:
# if logits3:
return _, predicted_index1, predicted_index2#, predicted_index3
# else:
# return _, predicted_index1, predicted_index2
else:
# if logits3:
return loss, predicted_index1, predicted_index2#, predicted_index3 #, predicted_index
# else:
# return loss, predicted_index1, predicted_index2 #, predicted_index
def spatial_type_classification_before_batch(model, sentence, triplets, triplet_label = None, type_labels = None, device= 'cuda:0', asked_class_label = None, other = None, asked = False):
# print(sentence,'\n',triplet)
#tokenized sentence
if other == 'sptypeQA':
encoding = sentence[0]
else:
tokenized_text = tokenizing(sentence)
encoding = tokenizer.encode(sentence, add_special_tokens=True)
token_type_ids = [0]*len(encoding)
# if other == 'sptypeQA': triplets_tokens, triplets_tokens_pass = [], []
# else:
triplets_tokens, triplets_tokens_pass, _triplets_tokens_pass = [], [], []
# print('encoded sentence', encoding)
if triplet['trajector'] != ['','']:
if other == 'sptypeQA':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['trajector'][0]:triplet['trajector'][1]+1]+[102]
for x in range(triplet['trajector'][0],triplet['trajector'][1]+1):
token_type_ids[x] = 1
if triplet['spatial_indicator'] != ['','']:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1].tolist()+[102]#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
triplets_tokens_pass += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['spatial_indicator'][0]:triplet['spatial_indicator'][1]+1]+[102]
for x in range(triplet['spatial_indicator'][0],triplet['spatial_indicator'][1]+1):
token_type_ids[x] = 1
# print('&&', tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]) # shouldn't consider [cls] so subtract 1
spatial_indicator = ''
if other != 'sptypeQA' and other != 'triplet':
for z in tokenized_text[triplet['spatial_indicator'][0]-1:triplet['spatial_indicator'][1]]:
spatial_indicator += z +' '
if triplet['landmark'] != ['','']:
if other == 'sptypeQA' :
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()+[102]
triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1].tolist()#+[1010]
elif other == 'triplet':
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
_triplets_tokens_pass += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
else:
triplets_tokens += encoding[triplet['landmark'][0]:triplet['landmark'][1]+1]+[102]
for x in range(triplet['landmark'][0],triplet['landmark'][1]+1):
token_type_ids[x] = 1
if _triplets_tokens_pass: triplets_tokens_pass += _triplets_tokens_pass
# print(spatial_indicator, 'type rel', type_label, all_rels_type)
# print()
token_type_ids = [0]*len(triplets_tokens) + token_type_ids
if other == 'sptypeQA':
encoding =[ encoding[0].tolist()]+ triplets_tokens + encoding[1:].tolist()
else: encoding = [encoding[0]]+ triplets_tokens + encoding[1:]
token_type_ids = torch.tensor([token_type_ids], device = device)
inputs = torch.tensor(encoding, device = device).unsqueeze(0)
logits3 = None
if label != None:
labels = [torch.tensor([label], device = device).float(), torch.tensor([type_label], device = device).long()]
# labels = [torch.tensor([label], device = device).float(), torch.tensor([type_label], device = device).long(), torch.tensor([asked_class_label if asked_class_label!= None else 0], device = device).float() ]
# print(inputs.shape, labels.shape)
loss, logits1, logits2 = model(inputs, token_type_ids = token_type_ids, labels=labels)
# loss, logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, labels=labels, asked_compute = asked_class_label)
# print(logits1)
else:
type_label = None
loss = None
# _,logits1, logits2, logits3 = model(inputs, token_type_ids = token_type_ids, asked_compute = True)
_,logits1, logits2 = model(inputs, token_type_ids = token_type_ids)
predicted_index1 = 1 if logits1[0] > 0.65 else 0
# print('logits2', logits2)
predicted_index2 = torch.argmax(logits2[0]).item()
# if asked_class_label != None:
# predicted_index3 = 1 if logits3[0] > 0.65 else 0
# else: predicted_index3 = None
# predicted_index = torch.argmax(logits).item()
if label == None and (other == 'sptypeQA' or other == ' triplet'):
if predicted_index1 == 1:
if other == 'sptypeQA':
return triplets_tokens_pass
#if we want to pass the triplet in the form of text
elif other == 'triplet':
#all_rel_types = ['NaN', 'LEFT','RIGHT','BELOW','ABOVE','NEAR', 'FAR', 'TPP', 'NTPP', 'NTPPI', 'EC']
triplet_sent = tokenizer.decode(triplets_tokens_pass)
triplet_sent = triplet_sent[:-5].replace('[SEP]',',')+'.'
###For replacing spatial indicator
#end = triplet_sent.rfind('[SEP]',0,len(triplet_sent)-5)
#triplet_sent = triplet_sent[:end].replace('[SEP]',',') + '. ' #+all_rel_types[predicted_index2]+'.'
# print('%%', triplet_sent)
return triplet_sent
#else
# triplet_sent = triplet_sent.replace('[SEP]',',')
# triplet_sent += ' '+all_rel_types[predicted_index2]+'.'
# triplet_sent = triplet_sent[:-1]+'.'
# print('In BERT, triplet tokens: ',triplet_sent)
else: return None
elif label == None:
# if logits3:
return _, predicted_index1, predicted_index2#, predicted_index3
# else:
# return _, predicted_index1, predicted_index2
else:
# if logits3:
return loss, predicted_index1, predicted_index2#, predicted_index3 #, predicted_index
# else:
# return loss, predicted_index1, predicted_index2 #, predicted_index
def token_classification(model, text, traj=None, land=None, indicator=None, other=None, device = 'cuda:0', file = ''):
loss, truth = '', ''
if other != 'sptypeQA':
inputs = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
else:
inputs = text
if traj or land or indicator:
labels = make_token_label(text, traj, land, indicator, other).to(device)
outputs = model(inputs, labels=labels)
loss = outputs.loss
truth = [element.item() for element in labels[0].flatten()]
else: outputs = model(inputs)
logits = outputs.logits
predicted_index = [torch.argmax(predict).item() for predict in logits[0] ]
return loss, predicted_index, truth
def make_token_label(text, traj, land, indicator,other):
# print(text, traj, land, indicator)
encoding = tokenizerFast(text, return_offsets_mapping= True)
token_starts = [item[0] for item in encoding['offset_mapping']]
token_ends = [item[1] for item in encoding['offset_mapping']]
# print(token_starts, token_ends)
# print()
# labels_id = ['O', 'B_traj', 'I_traj', 'B_land', 'I_land', 'B_spatial', 'I_spatial']
labels_id = ['O', 'B_entity', 'I_entity', 'B_indicator', 'I_indicator']
# text_token = tokenized_text #tokenizing(text)
labels = torch.tensor([0] * len(encoding['input_ids']))
#trajector
for ind, t in enumerate(traj):
# if t['start']!= '' and t['end']!= '':
# print(t)
#skip WRONG ANNOTATION
if t['start'] not in token_starts or t['end'] not in token_ends: continue
B_token = token_starts[1:-1].index(t['start'])+1
E_token = token_ends[1:-1].index(t['end'])+1
# print(B_token, E_token)
labels[B_token] = labels_id.index('B_entity')
for i in range(B_token+1, E_token+1):
labels[i] = labels_id.index('I_entity')
#landmark
for l in land:
# if l['start']!= '' and l['end']!= '':
# print(l)
#skip WRONG ANNOTATION
if l['start'] not in token_starts or l['end'] not in token_ends: continue
B_token = token_starts[1:-1].index(l['start'])+1
E_token = token_ends[1:-1].index(l['end'])+1
# print(B_token, E_token)
labels[B_token] = labels_id.index('B_entity')
for i in range(B_token+1, E_token+1):
labels[i] = labels_id.index('I_entity')
#spatial
for ind in indicator:
# if ind['start']!= '' and ind['end']!= '':
# print(ind)
#skip WRONG ANNOTATION or it is empty
if ind['start'] not in token_starts or ind['end'] not in token_ends: continue
B_token = token_starts[1:-1].index(ind['start'])+1
E_token = token_ends[1:-1].index(ind['end'])+1
# print(B_token, E_token)
labels[B_token] = labels_id.index('B_indicator')
for i in range(B_token+1, E_token+1):
labels[i] = labels_id.index('I_indicator')
# print('labels:', labels)
labels = labels.unsqueeze(0)
return labels
def extract_entity_token(text, traj, land, indicator, _tuple=False):
# print(text, traj, land, indicator)
encoding = tokenizerFast(text, return_offsets_mapping= True)
token_starts = [item[0] for item in encoding['offset_mapping']]
token_ends = [item[1] for item in encoding['offset_mapping']]
# print(token_starts, token_ends)
if _tuple:
token_index = {'trajector': [-1,-1], 'landmark': [-1,-1], 'spatial_indicator':[-1,-1], 'rel_type': ''}
# spatial_indicator = []
else:
token_index = {'trajector': [-1,-1], 'landmark': [-1,-1], 'spatial_indicator':[-1,-1]}
#trajector
if (traj['start']!= '' and traj['start']!= -1) and (traj['end']!= '' and traj['end']!= -1):
# print(t)
#start
token_index['trajector'][0]= token_starts[1:-1].index(traj['start'])+1
#end
token_index['trajector'][1]= token_ends[1:-1].index(traj['end'])+1
#landmark
if (land['start']!= '' and land['start']!= -1) and (land['end']!= '' and land['end']!= -1):
# print(l)
#start
token_index['landmark'][0]= token_starts[1:-1].index(land['start'])+1
#end
token_index['landmark'][1]= token_ends[1:-1].index(land['end'])+1
#spatial
if (indicator['start']!= '' and indicator['start']!= -1) and (indicator['end']!= '' and indicator['end']!= -1):
# if _tuple:
# spatial_indicator = [token_starts[1:-1].index(indicator['start'])+1, token_ends[1:-1].index(indicator['end'])+1]
# # print(ind)
# else:
token_index['spatial_indicator'][0] = token_starts[1:-1].index(indicator['start'])+1
token_index['spatial_indicator'][1] = token_ends[1:-1].index(indicator['end'])+1
# print('token_index:', token_index)
# if _tuple:
# return token_index, spatial_indicator
# else:
return token_index
# def boolean_classification_end2end(model, question, text, q_type, candidate ,correct_label, other, device):
def boolean_classification_end2end(model, questions, text, q_type, candidates ,correct_labels, other, device, story_annot =None, qs_annot=None, seperate = False):
# encoding = tokenizer.encode_plus(question, text)
# print(text, question, candidate, correct_label)
if seperate:
#seperate each sentence
sentences = [h+'.' for h in text.split('. ')]
sentences[-1] = sentences[-1][:-1]
text_tokenized = tokenizer(sentences, return_tensors="pt", padding=True)["input_ids"].to(device)
else:
text_tokenized = tokenizer(text, return_tensors="pt", padding=True)["input_ids"].to(device)
# print(text,'\n', text_tokenized)
# print('&&', questions)
qs_tokenized = tokenizer(questions, return_tensors="pt", padding=True)["input_ids"].to(device)
# print(qs_tokenized)
# print('## text+qs_tokenized: ',text_tokenized)
# qs_tokenized = tokenizer(question, return_tensors="pt")["input_ids"].to(device)
# print('### qs_tokenized: ',qs_tokenized)
# inputs = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
# if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
input_ids, token_type_ids = [], []
labels =[]
for _ind,correct_label in enumerate(correct_labels):
if q_type == 'CO':
label = torch.tensor([[0]]*2, device = device).long()
# candid_tokenized = tokenizer(candidate[:2], return_tensors="pt", padding=True)["input_ids"].to(device)
for opt in candidates[_ind][:2]:
# tokenized_opt = tokenizing(opt)
# num_tok = len(tokenized_opt)
# encoded_options = tokenizer.encode(tokenized_opt + ['[PAD]']*(max_len - num_tok))#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if correct_label == [0] or correct_label == [2]: label[0][0] = 1
if correct_label == [1] or correct_label == [2]: label[1][0] = 1
elif q_type == 'FB':
label = torch.tensor([[0]]*len(candidates[_ind]), device = device).long()
candid_tokenized = tokenizer(candidates[_ind], return_tensors="pt", padding=True)["input_ids"].to(device)
for opt in candidates[_ind]:
# tokenized_opt = tokenizing(opt)
# # num_tok = len(tokenized_opt)
# encoded_options = tokenizer.encode(tokenized_opt)#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if 'A' in correct_label: label[0][0] = 1
if 'B' in correct_label: label[1][0] = 1
if 'C' in correct_label: label[2][0] = 1
elif q_type == 'FR':
# _input = []
label = torch.tensor([0]*7, device = device).long()
for ind, opt in enumerate(candidates[_ind][:7]):
# _input += [text_tokenized]
if ind in correct_label:label[ind] = 1
# text_tokenized = torch.stack(_input)
# print('label', label)
# elif q_type == 'YN' and other == "DK": #and candidate != ['babi']:
# if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
# else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
# elif q_type == 'YN' and other == "noDK":
# if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
# elif q_type == 'YN' and candidate == ['boolq']:
# if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# # else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
elif q_type == 'YN': #and candidate != ['babi']:
if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = text_tokenized
# else : label = torch.tensor([0,0], device = device).long()
# elif q_type == 'YN' and candidate == ['babi']:
# label = torch.tensor([1,0], device = device).long() if correct_label == ['Yes'] else torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
labels += [label]
labels = torch.stack(labels).to(device)
# print('$', correct_labels)
# print('$$', labels, type(labels))
# input_ids = torch.tensor(input_ids, device = device)
# print('input shape, label shape',text_tokenized.shape, qs_tokenized.shape, labels.shape)
if other == 'supervised':
_outputs = model(text_tokenized, qs_tokenized, story_annotations = story_annot, questions_annotations = qs_annot, labels=labels)
else:
_outputs = model(text_tokenized, qs_tokenized, labels=labels)
# print('&&&&&&&&&& outputs', outputs)
losses, outs = [], []
for outputs in _outputs:
loss, logits = outputs[:2]
# print('$$', loss)
losses += [loss]
# print("loss, logits ", loss, logits)
out_logit = [torch.argmax(log) for log in logits]
out = [0]
if q_type == 'FR':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 2 in out and 3 in out:
if logits[2][1] >= logits[3][1]:
out.remove(3)
else:
out.remove(2)
if 0 in out and 1 in out:
if logits[0][1] >= logits[1][1]:
out.remove(1)
else:
out.remove(0)
if 4 in out and 5 in out:
if logits[4][1] >= logits[5][1]:
out.remove(5)
else:
out.remove(4)
if out == []: out = [7]
elif q_type == 'FB':
blocks = ['A', 'B', 'C']
out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# if 'C' in out and 'C' not in candidate: out.remove('C')
elif q_type == 'CO':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 0 in out and 1 in out:
out = [2]
elif out == []: out = [3]
elif q_type == 'YN' and other == 'multiple_class':
max_arg = torch.argmax(logits)
# print(correct_label, logits, max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'DK' and (candidates == ['babi'] or candidates == ['boolq']):
# print('logits: ', logits)
max_arg = torch.argmax(logits[:2, 1])
# print("2", max_arg )
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
elif q_type == 'YN' and other == 'DK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg , logits)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'noDK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
# else: out = ['DK']
elif q_type == 'YN' and other == 'change_model':
# max_arg = torch.argmax(logits[:, 1])
# if max_arg.item() == 0: out = ['Yes']
# elif max_arg.item() == 1: out = ['No']
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and candidates != ['babi']:
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
# if out_logit[0] == out_logit[1]:
# if out_logit[0].item() == 0: out = ['DK']
# else:
# max_arg = torch.argmax(logits[: , 1])
# out = ['Yes'] if max_arg.item() == 0 else ['No']
# else: out = ['Yes'] if out_logit[0].item() == 1 else ['No']
elif q_type == 'YN' and (candidates == ['babi'] or candidates == ['boolq']):
max_arg = torch.argmax(logits[:, 1])
out = ['Yes'] if max_arg.item() == 0 else ['No']
outs += [out]
losses = torch.stack(losses)
# print('out logit: ', outs, losses)
return losses, outs #, out_logit
def boolean_classification_addSpRL(model, questions, text, q_type, candidates ,correct_labels, other, device, seperate = False, gt_triplets = None, dataset = None):
# encoding = tokenizer.encode_plus(question, text)
# print(text, question, candidate, correct_label)
z = model.options
attention_mask_s = None,
attention_mask_q = None
if seperate or "q+s" not in model.options :
#seperate each sentence
if dataset in ["stepgame", "sprlqa"]:
sentences = text
else:
sentences = [h+'.' for h in text.split('. ')]
sentences[-1] = sentences[-1][:-1]
#sentences = [question] + sentences #the first sentence always is the question
# print('\nSentences',sentences)
_text_tokenized = tokenizer(sentences, return_tensors="pt", padding=True, return_attention_mask=True)
text_tokenized = _text_tokenized["input_ids"].to(device)
attention_mask_s = _text_tokenized["attention_mask"].to(device)
# print('sentence', sentences, text_tokenized)
else:
text_tokenized = tokenizer(text, return_tensors="pt", padding=True)["input_ids"].to(device)
# print(text,'\n', text_tokenized)
# print('Question', questions)
_qs_tokenized = tokenizer(questions, return_tensors="pt", padding=True, return_attention_mask=True)
qs_tokenized = _qs_tokenized["input_ids"].to(device)
attention_mask_q = _qs_tokenized["attention_mask"].to(device)
# print('## text+qs_tokenized: ',text_tokenized)
# qs_tokenized = tokenizer(question, return_tensors="pt")["input_ids"].to(device)
# print('### qs_tokenized: ',qs_tokenized)
# inputs = tokenizer(text, return_tensors="pt")["input_ids"].to(device)
# if candidate: max_len = max([len(tokenizing(opt)) for opt in candidate])
input_ids, token_type_ids = [], []
labels =[]
for _ind,correct_label in enumerate(correct_labels):
if q_type == 'CO':
label = torch.tensor([[0]]*2, device = device).long()
# candid_tokenized = tokenizer(candidate[:2], return_tensors="pt", padding=True)["input_ids"].to(device)
for opt in candidates[_ind][:2]:
# tokenized_opt = tokenizing(opt)
# num_tok = len(tokenized_opt)
# encoded_options = tokenizer.encode(tokenized_opt + ['[PAD]']*(max_len - num_tok))#[1:]
input_ids += [encoded_options + encoding["input_ids"][1:]]
if correct_label == [0] or correct_label == [2]: label[0][0] = 1
if correct_label == [1] or correct_label == [2]: label[1][0] = 1
elif q_type == 'FB':
label = torch.tensor([[0]]*len(candidates[_ind]), device = device).long()
candid_tokenized = tokenizer(candidates[_ind], return_tensors="pt", padding=True)["input_ids"].to(device)
# for opt in candidates[_ind]:
# # tokenized_opt = tokenizing(opt)
# # # num_tok = len(tokenized_opt)
# # encoded_options = tokenizer.encode(tokenized_opt)#[1:]
# input_ids += [encoded_options + encoding["input_ids"][1:]]
if 'A' in correct_label: label[0][0] = 1
if 'B' in correct_label: label[1][0] = 1
if 'C' in correct_label: label[2][0] = 1
elif q_type == 'FR':
if dataset != 'stepgame':
label = torch.tensor([0]*7, device = device).long()
for ind, opt in enumerate(candidates[_ind][:7]):
if ind in correct_label:label[ind] = 1
else:
label = torch.tensor([correct_label], device=device).long()
# elif q_type == 'YN' and other == "DK": #and candidate != ['babi']:
# if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
# else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
elif q_type == 'YN' and other == "noDK":
if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
# elif q_type == 'YN' and candidate == ['boolq']:
# if correct_label == ['Yes']: label = torch.tensor([1,0], device = device).long()
# elif correct_label == ['No']: label = torch.tensor([0,1], device = device).long()
# # else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
elif q_type == 'YN': #and candidate != ['babi']:
if correct_label == ['Yes']: label = torch.tensor([1,0,0], device = device).long()
elif correct_label == ['No']: label = torch.tensor([0,1,0], device = device).long()
else: label = torch.tensor([0,0,1], device = device).long()
# input_ids = text_tokenized
# else : label = torch.tensor([0,0], device = device).long()
# elif q_type == 'YN' and candidate == ['babi']:
# label = torch.tensor([1,0], device = device).long() if correct_label == ['Yes'] else torch.tensor([0,1], device = device).long()
# input_ids = [encoding["input_ids"]]
labels += [label]
labels = torch.stack(labels).to(device)
# print('$', correct_labels)
# print('$$', labels, type(labels))
# input_ids = torch.tensor(input_ids, device = device)
# print('input shape, label shape',text_tokenized.shape, qs_tokenized.shape, labels.shape)
_outputs, extracted_triplets_index = model(text_tokenized, qs_tokenized, labels=labels, attention_mask_s = attention_mask_s, attention_mask_q = attention_mask_q, gt_triplets = gt_triplets)
# print('&&&&&&&&&& outputs', outputs)
losses, outs = [], []
for outputs in _outputs:
loss, logits = outputs[:2]
# print('$$', loss)
losses += [loss]
# print("loss, logits ", loss, logits)
out_logit = [torch.argmax(log) for log in logits]
out = [0]
if q_type == 'FR':
if dataset != 'stepgame':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 2 in out and 3 in out:
if logits[2][1] >= logits[3][1]:
out.remove(3)
else:
out.remove(2)
if 0 in out and 1 in out:
if logits[0][1] >= logits[1][1]:
out.remove(1)
else:
out.remove(0)
if 4 in out and 5 in out:
if logits[4][1] >= logits[5][1]:
out.remove(5)
else:
out.remove(4)
if out == []: out = [7]
else:
out = [out_logit[0].item()]
elif q_type == 'FB':
blocks = ['A', 'B', 'C']
out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# out = [blocks[ind] for ind,o in enumerate(out_logit) if o.item() == 1]
# if 'C' in out and 'C' not in candidate: out.remove('C')
elif q_type == 'CO':
out = [ind for ind,o in enumerate(out_logit) if o.item() == 1]
if 0 in out and 1 in out:
out = [2]
elif out == []: out = [3]
elif q_type == 'YN' and other == 'multiple_class':
max_arg = torch.argmax(logits)
# print(correct_label, logits, max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'DK' and (candidates == ['babi'] or candidates == ['boolq']):
# print('logits: ', logits)
max_arg = torch.argmax(logits[:2, 1])
# print("2", max_arg )
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
elif q_type == 'YN' and other == 'DK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg , logits)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and other == 'noDK':
max_arg = torch.argmax(logits[:, 1])
# print("2", max_arg)
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
# else: out = ['DK']
elif q_type == 'YN' and other == 'change_model':
# max_arg = torch.argmax(logits[:, 1])
# if max_arg.item() == 0: out = ['Yes']
# elif max_arg.item() == 1: out = ['No']
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
elif q_type == 'YN' and candidates != ['babi']:
max_arg = torch.argmax(logits[:, 1])
if max_arg.item() == 0: out = ['Yes']
elif max_arg.item() == 1: out = ['No']
else: out = ['DK']
# if out_logit[0] == out_logit[1]:
# if out_logit[0].item() == 0: out = ['DK']
# else:
# max_arg = torch.argmax(logits[: , 1])
# out = ['Yes'] if max_arg.item() == 0 else ['No']
# else: out = ['Yes'] if out_logit[0].item() == 1 else ['No']
elif q_type == 'YN' and (candidates == ['babi'] or candidates == ['boolq']):
max_arg = torch.argmax(logits[:, 1])
out = ['Yes'] if max_arg.item() == 0 else ['No']
outs += [out]
losses = torch.stack(losses)
# print('out logit: ', outs, losses)
return losses, outs, extracted_triplets_index #, out_logit
def tokenizing(text):
encoding = tokenizer.tokenize(text)
return encoding
| 57,750 | 40.398566 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/BertModels.py | # from transformers import BertPreTrainedModel, BertModel, BertOnlyMLMHead
from transformers import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss, BCELoss
from typing import Union, List
import numpy
from torch.autograd import Variable
class FocalLoss(nn.Module):
r"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
super(FocalLoss, self).__init__()
if alpha is None:
self.alpha = Variable(torch.ones(class_num, 1))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets):
N = inputs.size(0)
C = inputs.size(1)
P = F.softmax(inputs)
class_mask = inputs.data.new(N, C).fill_(0)
class_mask = Variable(class_mask)
ids = targets.view(-1, 1)
class_mask.scatter_(1, ids.data, 1.)
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
alpha = self.alpha[ids.data.view(-1)]
probs = (P*class_mask).sum(1).view(-1,1)
log_p = probs.log()
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
# ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
# class BertPredictionHeadTransform(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.dense = nn.Linear(config.hidden_size, config.hidden_size)
# if isinstance(config.hidden_act, str):
# self.transform_act_fn = ACT2FN[config.hidden_act]
# else:
# self.transform_act_fn = config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# def forward(self, hidden_states):
# hidden_states = self.dense(hidden_states)
# hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
# return hidden_states
# class BertLMPredictionHead(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.transform = BertPredictionHeadTransform(config)
# # The output weights are the same as the input embeddings, but there is
# # an output-only bias for each token.
# self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
# self.decoder.bias = self.bias
# def forward(self, hidden_states):
# hidden_states = self.transform(hidden_states)
# hidden_states = self.decoder(hidden_states)
# return hidden_states
# class BertOnlyMLMHead(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.predictions = BertLMPredictionHead(config)
# def forward(self, sequence_output):
# prediction_scores = self.predictions(sequence_output)
# return prediction_scores
def sequence_cross_entropy_with_logits(logits: torch.Tensor,
targets: torch.Tensor,
weights: torch.Tensor,
average: str = "batch",
label_smoothing: float = None,
gamma: float = None,
eps: float = 1e-8,
alpha: Union[float, List[float], torch.FloatTensor] = None
) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
gamma : ``float``, optional (default = None)
Focal loss[*] focusing parameter ``gamma`` to reduces the relative loss for
well-classified examples and put more focus on hard. The greater value
``gamma`` is, the more focus on hard examples.
alpha : ``float`` or ``List[float]``, optional (default = None)
Focal loss[*] weighting factor ``alpha`` to balance between classes. Can be
used independently with ``gamma``. If a single ``float`` is provided, it
is assumed binary case using ``alpha`` and ``1 - alpha`` for positive and
negative respectively. If a list of ``float`` is provided, with the same
length as the number of classes, the weights will match the classes.
[*] T. Lin, P. Goyal, R. Girshick, K. He and P. Dollár, "Focal Loss for
Dense Object Detection," 2017 IEEE International Conference on Computer
Vision (ICCV), Venice, 2017, pp. 2999-3007.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,).
"""
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# make sure weights are float
weights = weights.float()
# sum all dim except batch
non_batch_dims = tuple(range(1, len(weights.shape)))
# shape : (batch_size,)
weights_batch_sum = weights.sum(dim=non_batch_dims)
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
# focal loss coefficient
if gamma:
# shape : (batch * sequence_length, num_classes)
probs_flat = log_probs_flat.exp()
eps = torch.tensor(eps, device=probs_flat.device)
probs_flat = probs_flat.min(1 - eps)
probs_flat = probs_flat.max(eps)
# shape : (batch * sequence_length,)
probs_flat = torch.gather(probs_flat, dim=1, index=targets_flat)
# shape : (batch * sequence_length,)
focal_factor = (1. - probs_flat) ** gamma
# shape : (batch, sequence_length)
focal_factor = focal_factor.view(*targets.size())
weights = weights * focal_factor
if alpha is not None:
# shape : () / (num_classes,)
if isinstance(alpha, (float, int)):
# pylint: disable=not-callable
# shape : (2,)
alpha_factor = torch.tensor([1. - float(alpha), float(alpha)],
dtype=weights.dtype, device=weights.device)
# pylint: enable=not-callable
elif isinstance(alpha, (list, numpy.ndarray, torch.Tensor)):
# pylint: disable=not-callable
# shape : (c,)
alpha_factor = torch.tensor(alpha, dtype=weights.dtype, device=weights.device)
# pylint: enable=not-callable
if not alpha_factor.size():
# shape : (1,)
alpha_factor = alpha_factor.view(1)
# shape : (2,)
alpha_factor = torch.cat([1 - alpha_factor, alpha_factor])
else:
raise TypeError(('alpha must be float, list of float, or torch.FloatTensor, '
'{} provided.').format(type(alpha)))
# shape : (batch, max_len)
alpha_factor = torch.gather(alpha_factor, dim=0, index=targets_flat.view(-1)).view(*targets.size())
weights = weights * alpha_factor
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(non_batch_dims) / (weights_batch_sum + 1e-13)
num_non_empty_sequences = ((weights_batch_sum > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights_batch_sum.sum() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(non_batch_dims) / (weights_batch_sum + 1e-13)
return per_batch_loss
class BertForQuestionAnswering(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
# class BertForMultipleChoice(BertPreTrainedModel):
# def __init__(self, config, device = 'cuda:0', no_dropout = False):
# super().__init__(config)
# if no_dropout:
# config.hidden_dropout_prob = 0.0
# config.attention_probs_dropout_prob = 0.0
# self.device = device
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier = nn.Linear(config.hidden_size, 1)
# # nn.Sequential( nn.Linear(config.hidden_size, 2 * config.hidden_size),
# # nn.LeakyReLU(),
# # nn.Linear(2 * config.hidden_size, config.hidden_size),
# # nn.Tanh(),
# # nn.Linear(config.hidden_size, 1))
# # self.alphas = torch.tensor([[0.5, 0.25, 0.25] ]).to(self.device)
# self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# num_choices = input_ids.shape[1]
# input_ids = input_ids.view(-1, input_ids.size(-1))
# attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
# token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
# position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# print(pooled_output.shape)
# # pooled_output, _ = self.rnn(pooled_output)
# # print(pooled_output.shape)
# # pooled_output = pooled_output[:, -1, :]
# pooled_output = self.dropout(pooled_output)
# print(pooled_output.shape)
# logits = self.classifier(pooled_output)
# reshaped_logits = logits.view(-1, num_choices)
# outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# loss = loss_fct(reshaped_logits, labels)
# outputs = (loss,) + outputs
# return outputs # (loss), reshaped_logits, (hidden_st
class BertForMultipleClass(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False, num_classes = 3, qtype = 'YN'):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = num_classes
self.qtype = qtype
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
if self.qtype == 'YN':
self.alphas = torch.tensor([[0.67, 1.3, 1.32]]).to(self.device1)
elif self.qtype == "FR":
self.alphas = torch.tensor([0.125]*self.num_classes).to(self.device1)
# self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5] ]).to('cuda:3')
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [1, 0] ]).to(self.device)
# classifiers = []
# self.criterion = []
# for item in range(1):
self.classifiers= nn.Linear(config.hidden_size, self.num_classes)
self.criterion= FocalLoss(alpha=self.alphas, class_num=self.num_classes, gamma = 2)
# self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = []
# for ind in range(1):
logit = self.classifiers(pooled_output)
# logits = logit.squeeze(0)
if labels is not None:
# loss = 0
# out_logits = []
loss = self.criterion(logit, labels)
out_logits = self.softmax(logit)
outputs = (loss, out_logits,) + outputs[2:]
return outputs
class BertForMultipleClassLoad(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False, num_classes = 3, qtype = 'YN'):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout1 = nn.Dropout(config.hidden_dropout_prob)
self.num_classes1 = num_classes
self.qtype = qtype
self.classifier1 = nn.Linear(config.hidden_size, self.num_classes1)
if self.qtype == 'YN':
self.alphas = torch.tensor([[0.67, 1.3, 1.32]]).to(self.device1)
elif self.qtype == "FR":
self.alphas = torch.tensor([0.125]*self.num_classes1).to(self.device1)
# self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5] ]).to('cuda:3')
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [1, 0] ]).to(self.device)
# classifiers = []
# self.criterion = []
# for item in range(1):
self.classifiers1= nn.Linear(config.hidden_size, self.num_classes1)
self.criterion1= FocalLoss(alpha=self.alphas, class_num=self.num_classes1, gamma = 2)
# self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax1 = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout1(pooled_output)
logits = []
# for ind in range(1):
logit = self.classifiers1(pooled_output)
# logits = logit.squeeze(0)
if labels is not None:
# loss = 0
# out_logits = []
loss = self.criterion1(logit, labels)
out_logits = self.softmax1(logit)
outputs = (loss, out_logits,) + outputs[2:]
return outputs
# class BertForBooleanQuestion(BertPreTrainedModel):
# def __init__(self, config):
# super().__init__(config)
# # config.hidden_dropout_prob = 0.0
# # config.attention_probs_dropout_prob = 0.0
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.num_classes = 2
# # self.classifier = nn.Sequential( nn.Linear(config.hidden_size, 2 * config.hidden_size),
# # nn.LeakyReLU(),
# # nn.Linear(2 * config.hidden_size, config.hidden_size),
# # nn.LeakyReLU(),
# # nn.Linear(config.hidden_size, config.hidden_size),
# # nn.LeakyReLU(),
# # nn.Linear(config.hidden_size, self.num_classes))
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.22, 0.78], [0.21, 0.79], [0.22, 0.73], [0.26, 0.74], [0.07, 0.93], [0.2, 0.98], [0.2, 0.98]]).to('cuda')
# classifiers = []
# self.criterion = []
# for item in range(7):
# classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
# self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
# self.classifiers = nn.ModuleList(classifiers)
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
# self.init_weights()
# #@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# token_type_ids=None,
# position_ids=None,
# head_mask=None,
# inputs_embeds=None,
# labels=None,
# ):
# r"""
# labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
# Labels for computing the multiple choice classification loss.
# Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
# of the input tensors. (see `input_ids` above)
# Returns:
# :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
# loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
# Classification loss.
# classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
# `num_choices` is the second dimension of the input tensors. (see `input_ids` above).
# Classification scores (before SoftMax).
# hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
# Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
# of shape :obj:`(batch_size, sequence_length, hidden_size)`.
# Hidden-states of the model at the output of each layer plus the initial embedding outputs.
# attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
# Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
# :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
# Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
# heads.
# Examples::
# from transformers import BertTokenizer, BertForMultipleChoice
# import torch
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
# choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
# input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
# labels = torch.tensor(1).unsqueeze(0) # Batch size 1
# outputs = model(input_ids, labels=labels)
# loss, classification_scores = outputs[:2]
# """
# outputs = self.bert(
# input_ids,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
# head_mask=head_mask,
# inputs_embeds=inputs_embeds,
# )
# pooled_output = outputs[1]
# # print(pooled_output.shape)
# # pooled_output, _ = self.rnn(pooled_output)
# # print(pooled_output.shape)
# # pooled_output = pooled_output[:, -1, :]
# pooled_output = self.dropout(pooled_output)
# # print(pooled_output.shape)
# logits = []
# for ind, logit in enumerate(pooled_output):
# logit = self.classifiers[ind](pooled_output[ind])
# logits.append(logit)
# # if self.num_classes == 1:
# # logits = self.sigmoid(logits)
# # else:
# # logits = self.softmax(logits)
# # add hidden states and attention if they are here
# if labels is not None:
# # print('hello')
# # loss_fct = CrossEntropyLoss(weight=torch.FloatTensor([13/100,87/100]).to('cuda:5'))
# # loss = loss_fct(logits, labels)
# # weights = []
# # ones = len(labels[labels==1])
# # zeros = len(labels[labels==0])
# # for label in labels:
# # if label.item() == 0:
# # weights.append(ones/len(labels))
# # else:
# # weights.append(zeros/len(labels))
# # weights = torch.tensor(weights).float().to('cuda:5')
# loss = 0
# out_logits = []
# for ind, logit in enumerate(logits):
# weights = torch.ones(2).float().to('cuda')
# alpha = self.alphas[ind].to('cuda')
# # weights = torch.tensor(weights).float().to('cuda:5')
# # loss += sequence_cross_entropy_with_logits(logit.unsqueeze(0), labels[ind].unsqueeze(0), weights, alpha=alpha, gamma=2, label_smoothing=0.1)
# loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
# out_logits.append(self.softmax(logit))
# outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# # outputs = (,) + outputs
# return outputs # (loss), reshaped_logits, (hidden_st
class BertForBooleanQuestionFR(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False, num_labels = 7):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device2 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels = num_labels
# self.classifier = nn.Linear(config.hidden_size, self.num_classes)
if self.num_labels == 7:
self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.02, 0.98], [0.02, 0.98]]).to(self.device2)
else:
self.alphas = torch.tensor([[0.5, 0.5]]*self.num_labels).to(self.device2)
classifiers = []
self.criterion = []
for item in range(self.num_labels):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# print(pooled_output.shape)
logits = []
for ind, logit in enumerate(pooled_output):
logit = self.classifiers[ind](pooled_output[ind])
logits.append(logit)
# for check on YN
# for ind in range(7):
# logit = self.classifiers[ind](pooled_output)
# logits.append(logit)
# print("FR",logits)
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs # (loss), reshaped_logits, (hidden_st
class BertForBooleanQuestionFB(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device2 = device
self.bert = BertModel(config)
self.bert_answer = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
self.alpha = torch.tensor([0.5, 0.5]).to(self.device2)
self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
options=None
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[0]
pooled_output = self.dropout(pooled_output)
pooled_output, _ = self.rnn(pooled_output)
pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
logits = self.classifier(pooled_output)
if labels is not None:
loss = self.criterion(logits, labels)
# print(loss)
out_logits = self.softmax(logits)
outputs = (loss, out_logits,) + outputs[2:]
# outputs = (,) + outputs
return outputs
class BertForBooleanQuestionCO(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device2 = device
self.bert = BertModel(config)
self.bert_answer = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
self.alpha = torch.tensor([0.35, 0.65]).to(self.device2)
self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
options=None
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[0]
pooled_output = self.dropout(pooled_output)
pooled_output, _ = self.rnn(pooled_output)
pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
logits = self.classifier(pooled_output)
if labels is not None:
loss = self.criterion(logits, labels)
out_logits = self.softmax(logits)
outputs = (loss, out_logits,) + outputs[2:]
return outputs
class BertForBooleanQuestionBabi(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
self.alpha = torch.tensor([0.5, 0.5]).to(self.device)
self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
self.criterion = CrossEntropyLoss()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
# print('pool out: ', pooled_output)
pooled_output = self.dropout(pooled_output)
# print(pooled_output.shape)
logits = self.classifier(pooled_output)
# print('logit tu', logits)
if labels is not None:
loss = self.criterion(logits, labels)
logits = self.softmax(logits)
outputs = (loss, logits,) + outputs[2:]
return outputs # (loss), reshaped_logits, (hidden_st
class BertForBooleanQuestionYN(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device2 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5] ]).to('cuda:3')
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [1, 0] ]).to(self.device)
self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device2)
classifiers = []
self.criterion = []
for item in range(3):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = []
for ind in range(3):
logit = self.classifiers[ind](pooled_output)
logits.append(logit.squeeze(0))
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs
class BertForBooleanQuestionYN1(BertPreTrainedModel):
def __init__(self,config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels = 3
self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device1)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device)
classifiers = []
self.criterion1 = []
for item in range(self.num_labels):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion1.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers1 = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = []
for ind in range(self.num_labels):
logit = self.classifiers1[ind](pooled_output)
logits.append(logit.squeeze(0))
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion1[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs
class BertForBooleanQuestionCO1(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.bert_answer = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier1 = nn.Linear(config.hidden_size, self.num_classes)
self.alpha = torch.tensor([0.35, 0.65]).to(self.device1)
self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
options=None
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[0]
pooled_output = self.dropout(pooled_output)
pooled_output, _ = self.rnn(pooled_output)
pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
logits = self.classifier1(pooled_output)
if labels is not None:
loss = self.criterion(logits, labels)
out_logits = self.softmax(logits)
outputs = (loss, out_logits,) + outputs[2:]
return outputs
class BertForBooleanQuestionFB1(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.bert_answer = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier1 = nn.Linear(config.hidden_size, self.num_classes)
self.alpha = torch.tensor([0.5, 0.5]).to(self.device1)
self.criterion = FocalLoss(alpha=self.alpha, class_num=self.num_classes, gamma = 2)
self.rnn = nn.LSTM(config.hidden_size, int(config.hidden_size/2), 1, bidirectional=True)
self.l1 = nn.Linear(config.hidden_size, config.hidden_size)
self.l2 = nn.Linear(config.hidden_size, config.hidden_size)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
options=None
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[0]
pooled_output = self.dropout(pooled_output)
pooled_output, _ = self.rnn(pooled_output)
pooled_output = torch.stack([pooled[-1] for pooled in pooled_output])
logits = self.classifier1(pooled_output)
if labels is not None:
loss = self.criterion(logits, labels)
# print(loss)
out_logits = self.softmax(logits)
outputs = (loss, out_logits,) + outputs[2:]
# outputs = (,) + outputs
return outputs
class BertForBooleanQuestionFR1(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
#self.classifier = nn.Linear(config.hidden_size, self.num_classes)
self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.2, 0.98], [0.2, 0.98]]).to(self.device1)
classifiers = []
self.criterion = []
for item in range(7):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers1 = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# print(pooled_output.shape)
logits = []
for ind, logit in enumerate(pooled_output):
logit = self.classifiers1[ind](pooled_output[ind])
logits.append(logit)
# for check on YN
# for ind in range(7):
# logit = self.classifiers[ind](pooled_output)
# logits.append(logit)
# print("FR",logits)
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs # (loss), reshaped_logits, (hidden_st
class BertForBooleanQuestionYNboolq(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.5, 0.5], [0.25, 0.75], [0.27, 0.73] ]).to(self.device)
# self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5] ]).to('cuda:3')
self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device1)
classifiers = []
self.criterion = []
for item in range(2):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = []
for ind in range(2):
logit = self.classifiers[ind](pooled_output)
logits.append(logit.squeeze(0))
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs
class BertForBooleanQuestionYNsprlqa(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device1)
classifiers = []
self.criterion = []
for item in range(self.num_labels):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = []
for ind in range(self.num_labels):
logit = self.classifiers[ind](pooled_output)
logits.append(logit.squeeze(0))
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs
class BertForBooleanQuestionYNsprlqaLoad(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.num_labels = 2
self.classifier1 = nn.Linear(config.hidden_size, self.num_classes)
self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5]]).to(self.device1)
classifiers = []
self.criterion1 = []
for item in range(self.num_labels):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion1.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers1 = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax1 = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = []
for ind in range(self.num_labels):
logit = self.classifiers1[ind](pooled_output)
logits.append(logit.squeeze(0))
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion1[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax1(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs
class BertForBooleanQuestionFRsprlqa(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device1 = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
# self.alphas = torch.tensor([[0.20, 0.8], [0.20, 0.8], [0.25, 0.75], [0.4, 0.6], [0.1, 0.9], [0.2, 0.98], [0.2, 0.98]]).to(self.device2)
self.alphas = torch.tensor([[0.09, 0.91], [0.44, 0.56], [0.007, 0.993], [0.007, 0.993], [0.086, 0.914], [0.057, 0.943], [0.013, 0.987], [0.13, 0.87], [0.03, 0.97], [0.006, 0.994], [0.13, 0.87], [0.007, 0.993], [0.0015, 0.9985], [0.013, 0.987], [0.003, 0.997], [0.003, 0.997], [0.006, 0.994], [0.003, 0.997]]).to(self.device1)
classifiers = []
self.criterion = []
for item in range(18):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# print(pooled_output.shape)
logits = []
for ind, logit in enumerate(pooled_output):
logit = self.classifiers[ind](pooled_output[ind])
logits.append(logit)
# for check on YN
# for ind in range(7):
# logit = self.classifiers[ind](pooled_output)
# logits.append(logit)
# print("FR",logits)
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
return outputs
class BertForBooleanQuestion3ClassYN(BertPreTrainedModel):
def __init__(self, config, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.device = device
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_classes = 2
self.classifier = nn.Linear(config.hidden_size, self.num_classes)
self.alphas = torch.tensor([[0.5, 0.5], [0.5, 0.5], [1, 0] ]).to(self.device)
classifiers = []
self.criterion = []
for item in range(3):
classifiers.append(nn.Linear(config.hidden_size, self.num_classes))
self.criterion.append(FocalLoss(alpha=self.alphas[item], class_num=self.num_classes, gamma = 2))
self.classifiers = nn.ModuleList(classifiers)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.init_weights()
#@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
# print('$',pooled_output.shape)
logits = []
# for ind, logit in enumerate(pooled_output):
# logit = self.classifiers[ind](pooled_output[ind])
# logits.append(logit)
for ind in range(3):
logit = self.classifiers[ind](pooled_output)
# print("#", logit.squeeze(0).shape)
logits.append(logit.squeeze(0))
if labels is not None:
loss = 0
out_logits = []
for ind, logit in enumerate(logits):
# weights = torch.ones(2).float()
# alpha = self.alphas[ind]
# print("**",labels.shape ,labels[ind], labels[ind].unsqueeze(0))
# print("**",logit.shape)
loss += self.criterion[ind](logit.unsqueeze(0), labels[ind].unsqueeze(0))
out_logits.append(self.softmax(logit))
outputs = (loss, torch.stack(out_logits),) + outputs[2:]
# outputs = (,) + outputs
return outputs # (loss), reshaped_logits, (hidden_st
class BertForSequenceClassification1(BertPreTrainedModel):
def __init__(self, config, type_class = 0, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
# self.device = device
self.num_labels = config.num_labels
self.num_type_class = type_class
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier1 = nn.Linear(config.hidden_size, self.num_labels)
self.classifier2 = nn.Linear(config.hidden_size, self.num_type_class)
# self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
self.init_weights()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits1 = self.classifier1(pooled_output)
logits2 = self.classifier2(pooled_output)
loss = 0
if labels is not None:
# label1 = labels[0].float()
if self.num_labels == 1:
# We are doing regression
# loss_fct = MSELoss()
loss_fct = BCELoss()
out_logits1 = self.sigmoid(logits1)
loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
out_logits1 = logits1#self.softmax(logits1)
# outputs1 = (loss,) + outputs1
# label2 = labels[1].long()
if self.num_type_class == 1:
# We are doing regression
# loss_fct = MSELoss()
out_logits2 = self.sigmoid(logits2)
loss_fct = BCELoss()
loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits2.view(-1, self.num_type_class), labels[1].view(-1))
out_logits2 = logits2#self.softmax(logits2)
# outputs2 = (loss,) + outputs2
else:
out_logits1 = self.sigmoid(logits1)
out_logits2 = logits2
outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
outputs2 = (out_logits2,) + outputs[2:]
return loss, outputs1, outputs2 # (loss), logits, (hidden_states), (attentions)
class BertForSequenceClassification2(BertPreTrainedModel):
def __init__(self, config, type_class = 0, num_asked_class = 1, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
# self.device = device
self.num_labels = config.num_labels
self.num_type_class = type_class
# self.num_asked_class = num_asked_class
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.classifier2 = nn.Linear(config.hidden_size, self.num_type_class)
# self.classifier3 = nn.Linear(config.hidden_size, self.num_asked_class)
# self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
self.init_weights()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
asked_compute = None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits1 = self.classifier(pooled_output)
logits2 = self.classifier2(pooled_output)
# if asked_compute != None:
# logits3 = self.classifier3(pooled_output)
loss = 0
if labels is not None:
# label1 = labels[0].float()
if self.num_labels == 1:
# We are doing regression
# loss_fct = MSELoss()
loss_fct = BCELoss()
out_logits1 = self.sigmoid(logits1)
loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
out_logits1 = logits1#self.softmax(logits1)
# outputs1 = (loss,) + outputs1
# label2 = labels[1].long()
if self.num_type_class == 1:
# We are doing regression
# loss_fct = MSELoss()
out_logits2 = self.sigmoid(logits2)
loss_fct = BCELoss()
loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits2.view(-1, self.num_type_class), labels[1].view(-1))
out_logits2 = logits2#self.softmax(logits2)
# outputs2 = (loss,) + outputs2
# if asked_compute !=None and self.num_asked_class == 1:
# loss_fct = BCELoss()
# out_logits3 = self.sigmoid(logits3)
# loss += loss_fct(out_logits3.view(-1), labels[0].view(-1))
# else:
# out_logits3 = None
else:
out_logits1 = self.sigmoid(logits1)
out_logits2 = logits2
# out_logits3 = logits3 if asked_compute != None else None
outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
outputs2 = (out_logits2,) + outputs[2:]
# outputs3 = (out_logits3,) + outputs[2:]
return loss, outputs1, outputs2#, outputs3 # (loss), logits, (hidden_states), (attentions)
class BertForSequenceClassification3(BertPreTrainedModel):
def __init__(self, config, type_class = 0, num_asked_class = 1, device = 'cuda:0', no_dropout = False):
super().__init__(config)
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
# self.device = device
self.num_labels = config.num_labels
self.num_type_class1 = type_class
# self.num_asked_class = num_asked_class
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier1 = nn.Linear(config.hidden_size, self.num_labels)
self.classifier21 = nn.Linear(config.hidden_size, self.num_type_class1)
# self.classifier3 = nn.Linear(config.hidden_size, self.num_asked_class)
# self.classifiers = nn.ModuleList([self.classifier1, self.classifier2])
self.init_weights()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
asked_compute = None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits1 = self.classifier1(pooled_output)
logits2 = self.classifier21(pooled_output)
# if asked_compute != None:
# logits3 = self.classifier3(pooled_output)
loss = 0
if labels is not None:
# label1 = labels[0].float()
if self.num_labels == 1:
# We are doing regression
# loss_fct = MSELoss()
loss_fct = BCELoss()
out_logits1 = self.sigmoid(logits1)
loss += loss_fct(out_logits1.view(-1), labels[0].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits1.view(-1, self.num_labels), labels[0].view(-1))
out_logits1 = logits1#self.softmax(logits1)
# outputs1 = (loss,) + outputs1
# label2 = labels[1].long()
if self.num_type_class1 == 1:
# We are doing regression
# loss_fct = MSELoss()
out_logits2 = self.sigmoid(logits2)
loss_fct = BCELoss()
loss += loss_fct(out_logits2.view(-1), labels[1].view(-1))
else:
loss_fct = CrossEntropyLoss()
loss += loss_fct(logits2.view(-1, self.num_type_class1), labels[1].view(-1))
out_logits2 = logits2#self.softmax(logits2)
# outputs2 = (loss,) + outputs2
# if asked_compute !=None and self.num_asked_class == 1:
# loss_fct = BCELoss()
# out_logits3 = self.sigmoid(logits3)
# loss += loss_fct(out_logits3.view(-1), labels[0].view(-1))
# else:
# out_logits3 = None
else:
out_logits1 = self.sigmoid(logits1)
out_logits2 = logits2
# out_logits3 = logits3 if asked_compute != None else None
outputs1 = (out_logits1,) + outputs[2:] # add hidden states and attention if they are here
outputs2 = (out_logits2,) + outputs[2:]
# outputs3 = (out_logits3,) + outputs[2:]
return loss, outputs1, outputs2#, outputs3 # (loss), logits, (hidden_states), (attentions)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config, type_class = 0, device = 'cuda:0', no_dropout = False):
super().__init__(config)
self.num_labels = config.num_labels
if no_dropout:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.sigmoid = nn.Sigmoid()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = BCELoss()
loss = loss_fct(self.sigmoid(logits).view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
| 75,165 | 36.009355 | 333 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/test.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from QA.train import question_to_sentence, F1_measure, precision, recall, confusion_matrix, concate_input_components, check_answer_equality
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
# from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
,num_sample = None
, train_num = None
, unseen = False
, qtype = None
, other = None
, data_name = "spartqa"
, save_data = False
, device = "cpu"
, file = None
, epochs = 0
):
# initialize_tokenizer(baseline)
#import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
all_q = 0
all_q_YN = 0
all_q_FR = 0
correct = 0
correct_YN = 0
correct_FR = 0
correct_no_distance = 0
# s_ind = 0
task = [qtype]
correct_consistency, consistency_total =0, 0
qtypes = ["YN", "FR"]
# qtypes = ["YN", "FR", "FB", "CO"] if data_name == "spartqa" else ["YN", "FR"]
qtype = qtypes if qtype == "all" else [qtype]
# k_fold = 7
model.eval()
is_human = False
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
if unseen:
with open('./dataset/unseen_test.json') as json_file:
data = json.load(json_file)
elif data_name == "human":
with open('./dataset/human_'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
is_human = True
data_name = "spartqa"
elif data_name == "spartun":
if other == "simple":
with open('dataset/SpaRTUN/'+test_or_dev+'_simple.json') as json_file:
data = json.load(json_file)
elif other == "clock":
with open('dataset/SpaRTUN/'+test_or_dev+'_clock.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/SpaRTUN/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
else:
# with open('./dataset/new_test.json') as json_file:
with open('dataset/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
if 'YN' in task or "all" in task: TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
#use k_fold for cross_evaluation
# if human and test_or_dev == 'dev' and s_ind not in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
#MLM tasks
if pretrain == 'mlm':
# story_txt = "the square is above the cicle. the circle is above the rectangle. the square is above rectangle."
print('Story:\n',story_txt, file = file)
tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
# print(question['q_id'],q_type, question['candidate_answers'], question['answer'][0])
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
print('Question: ',q_text,'\nAnswer: ', answer, file = file)
_, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'mlmr':
print('Story:\n',story_txt, file = file)
all_q += 1
_, output, truth = Masked_LM_random(model, story_txt, s_ind, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
#QA tasks
else:
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in qtype: #and len(question['answer']) == 1: #and x == 0:
if other == 'noDK' and question['answer'] == ['DK']: continue
q_text = question['question']
all_q += 1
if question['q_type'] == "YN": all_q_YN +=1
elif question["q_type"] == "FR": all_q_FR += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
input_text = concate_input_components([q_text, story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, [input_text], question['q_type'], question['candidate_answers'], [], other=other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, [input_text], question['q_type'], question['candidate_answers'], [], other=other, device = device, dataset = data_name)
print("predict: ", output[0], file = file)
correct_answer = question["answer"]
if check_answer_equality(correct_answer, output[0]) :
correct+=1
if question["q_type"] == "YN": correct_YN +=1
if question["q_type"] == "FR": correct_FR += 1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if question['q_type'] == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
elif output[0] == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
elif output[0] == correct_answer == ['DK']: TP[2] += 1
if question['q_type'] == 'FR' and is_human:
# print(correct_answer, output)
if 4 in correct_answer: correct_answer.remove(4)
if 5 in correct_answer: correct_answer.remove(5)
if 4 in output[0]: output[0].remove(4)
if 5 in output[0]: output[0].remove(5)
if correct_answer == output[0] :
correct_no_distance += 1
print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy: ', correct/ all_q)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy: ', correct/ all_q, file = file)
if task == "all":
if all_q_YN:
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on YN: ', correct_YN/ all_q_YN)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on YN: ', correct_YN/ all_q_YN, file = file)
if all_q_FR:
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on FR: ', correct_FR/ all_q_FR)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on FR: ', correct_FR/ all_q_FR, file = file)
# TODO changed
if is_human and ('FR' in task or "all" in task):
print(test_or_dev, ' accuracy with no distance: ', correct_no_distance/ all_q, file = file)
if 'YN' in task or "all" in task:
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct_YN / all_q_YN, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 18,533 | 39.734066 | 241 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/trainold.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from BERT import tokenizing
# from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
# from ALBERT import tokenizing
# from XLNet import tokenizing
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = None
, data_name = False
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
, dataset = "spartqa"
):
#import baseline
if baseline == 'bert':
# initialize_tokenizer(baseline)
from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
if data_name == "human":
with open('./dataset/human_train.json') as json_file:
data = json.load(json_file)
elif data_name == "spartun":
with open('./dataset/SpaRTUN/train.json') as json_file:
data = json.load(json_file)
# if task == ["all"]: qtype = ["YN"]
elif train_num == 'train24k':
with open('./dataset/train_24k.json') as json_file:
data = json.load(json_file)
elif train_num == 'train100k':
with open('./dataset/train_100k.json') as json_file:
data = json.load(json_file)
elif train_num == 'train500':
with open('./dataset/train_500.json') as json_file:
data = json.load(json_file)
elif other == 'unseen' :
with open('./dataset/unseen_test.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/train.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
if train_log: print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
#MLM tasks
#TODO add batch
if pretrain == 'mlm':
tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
if train_log: print('Question: ',q_text,'\nAnswer: ', answer, file = file)
loss, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#TODO add batch
elif pretrain == 'mlmr':
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
all_q += 1
loss, output, truth = Masked_LM_random(model, story_txt, s_ind+1, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#QA tasks
else:
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] :
if question['q_type'] in ['FA'] and question['start_end_char'] == []: continue
if other == 'noDK' and question['answer'] == ['DK']: continue
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
if train_log: print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
if pretrain == 'bertqa':
correct_start_end_word = correct_token_id(story_txt, q_text, question['start_end_char'], tokenizing, file)
#sent to model
loss, output, start, end = question_answering(model, q_text, story_txt, correct_start_end_word, device)
if train_log: print("Correct start end: ", correct_start_end_word, "\npredict: ", output, start, end, "\nstart end:", question['start_end_char'], file = file)
if question['answer'][0] == output and (start == correct_start_end_word[0] and end == correct_start_end_word[1]):
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'bertmc':
loss, output = multiple_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
elif pretrain == 'bertbc':
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
output.sort()
if correct_answer == output :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
# def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/babi/train.json') as json_file:
# data = json.load(json_file)
# number_samples = int((num_sample/8)+1) if num_sample else num_sample
# #random sampling or not
# random.seed(1)
# stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# for story in stories[:number_samples]:
# # if is_DK_babi(story['story'][0]): continue
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['story'][0]
# x = 0
# # each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = question['question']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
# loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
# #print("logit: ", logit , file = file)
# print("predict: ", output, file = file)
# correct_answer = question['answer']
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
# def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/boolQ/train.json') as json_file:
# data = json.load(json_file)
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# x = 0
# for story in data['data'][:num_sample]:
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['passage'][:1000]
# # print(story_txt)
# # each question (span)
# # for question in story['questions']:
# # q_text, q_emb= '', []
# # if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = story['question']
# answer = ['Yes'] if story['answer'] == True else ['No']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',answer, file = file)
# loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
# print("predict: ", output, file = file)
# correct_answer = answer
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 24,398 | 38.867647 | 237 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/testold.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from QA.train import question_to_sentence, F1_measure, precision, recall, confusion_matrix
from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
,num_sample = None
, train_num = None
,unseen = False
, qtype = None
, other = None
, data_name = False
, save_data = False
, device = "cpu"
, file = None
, data = None
, epochs = 0
):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
if unseen:
with open('./dataset/unseen_test.json') as json_file:
data = json.load(json_file)
elif data_name == "human":
#using k_fold
# if test_or_dev == 'dev':
# with open('./dataset/human_train.json') as json_file:
# data = json.load(json_file)
# else:
with open('./dataset/human_'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
elif data_name == "spartun":
with open('dataset/SpaRTUN/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
else:
# with open('./dataset/new_test.json') as json_file:
with open('dataset/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
#use k_fold for cross_evaluation
# if human and test_or_dev == 'dev' and s_ind not in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
#MLM tasks
if pretrain == 'mlm':
# story_txt = "the square is above the cicle. the circle is above the rectangle. the square is above rectangle."
print('Story:\n',story_txt, file = file)
tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
# print(question['q_id'],q_type, question['candidate_answers'], question['answer'][0])
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
print('Question: ',q_text,'\nAnswer: ', answer, file = file)
_, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'mlmr':
print('Story:\n',story_txt, file = file)
all_q += 1
_, output, truth = Masked_LM_random(model, story_txt, s_ind, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
#QA tasks
else:
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]: #and len(question['answer']) == 1: #and x == 0:
if question['q_type'] in ['FA'] and question['start_end_char'] == []: continue
if other == 'noDK' and question['answer'] == ['DK']: continue
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
if pretrain == 'bertqa':
correct_start_end_word = correct_token_id(story_txt, q_text, question['start_end_char'],tokenizing, file)
_, output, start, end = question_answering(model, q_text, story_txt,correct_start_end_word, device)
print("Correct start end: ", correct_start_end_word, "\npredict: ", output, start, end, "\nstart end:", question['start_end_char'], file = file)
if question['answer'][0] == output and (start == correct_start_end_word[0] and end == correct_start_end_word[1]):
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'bertmc':
_, output = multiple_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
if qtype == 'FR' and data_name == "human":
# print(correct_answer, output)
if 4 in correct_answer: correct_answer.remove(4)
if 5 in correct_answer: correct_answer.remove(5)
if 4 in output: output.remove(4)
if 5 in output: output.remove(5)
if correct_answer == output :
correct_no_distance += 1
print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
output.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
if qtype == 'FR' and data_name == "human":
# print(correct_answer, output)
if 4 in correct_answer: correct_answer.remove(4)
if 5 in correct_answer: correct_answer.remove(5)
if 4 in output: output.remove(4)
if 5 in output: output.remove(5)
if correct_answer == output :
correct_no_distance += 1
print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
if unseen:
print(test_or_dev+ ' unseen Final accuracy: ', correct/ all_q)
print(test_or_dev+' unseen Final accuracy: ', correct/ all_q, file = file)
else:
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
if data_name == "human" and qtype == 'FR':
print(test_or_dev, ' accuracy with no distance: ', correct_no_distance/ all_q, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct/ all_q, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 20,013 | 40.958071 | 241 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/train.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
# from BERT import tokenizing
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
# from ALBERT import tokenizing
# from XLNet import tokenizing
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = None
, data_name = "spartqa"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
#import baseline
# if baseline == 'bert':
# initialize_tokenizer(baseline)
# from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
"""
For computing the batch we store each input (Question+Context+Rule) and seperate thing for amswer.
If the size of stored batch is equal to batch_size then we pass them to the model
For spartuntrain on all (FR and YN) after each passing we change the qtype
all for spartqa = [CO, FB, FR, and YN]
amd for spartun = [FR, YN]
"""
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
task = [qtype]
qtypes = ["YN", "FR"]
# qtypes = ["YN", "FR", "FB", "CO"] if data_name == "spartqa" else ["YN", "FR"]
qtype = qtypes if qtype == "all" else [qtype]
if data_name == "human":
with open('./dataset/human_train.json') as json_file:
data = json.load(json_file)
data_name = "spartqa"
elif data_name == "spartun":
if other == 'simple':
with open('./dataset/SpaRTUN/train_simple.json') as json_file:
data = json.load(json_file)
elif other == "clock":
with open('./dataset/SpaRTUN/train_clock.json') as json_file:
data = json.load(json_file)
else:
with open('./dataset/SpaRTUN/train.json') as json_file:
data = json.load(json_file)
if task == ["all"]: qtype = ["YN"]
# if qtype == "all": qtype = ["YN"] # change alternatively
# else: qtype = [qtype]
else:
with open('dataset/train.json') as json_file:
data = json.load(json_file)
if 'YN' in qtype: TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
_temp_batch_input = []
_temp_batch_answer = []
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
story_txt = story['story'][0]
if train_log:
print('sample ',s_ind, file = file)
print('Story:\n',story_txt, file = file)
x = 0
#MLM tasks
#TODO add batch, add spartun. Now it is set on spartqa
if pretrain == 'mlm':
if data_name == "spartqa": tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
else: tasks_list = ['FR', 'YN'] if qtype == 'all' else [qtype]
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
model.zero_grad()
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
if train_log: print('Question: ',q_text,'\nAnswer: ', answer, file = file)
loss, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#TODO add batch, add spartun. Now it is set on spartqa
elif pretrain == 'mlmr':
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
all_q += 1
loss, output, truth = Masked_LM_random(model, story_txt, s_ind+1, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#QA tasks
else:
"""
based on the batch_size:
"""
# print('Story:\n',story_txt, file = file)
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
model.zero_grad()
if question['q_type'] in qtype :
if other == 'noDK' and question['answer'] == ['DK']: continue
x+=1
q_text = question['question']
if train_log: print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
all_q += 1
"""
add input and answer to the batch
if len(batch) == batch_size pass to the model
else: continue
"""
#TODO remove q_text
_temp_batch_input += [concate_input_components([q_text, story_txt], baseline)]
_temp_batch_answer += [question['answer']]
if len(_temp_batch_input) < batch_size : continue
#if batch is full it comes here
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, question['q_type'], question['candidate_answers'], _temp_batch_answer, other = other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
loss, output = boolean_classification(model, _temp_batch_input, question['q_type'], question['candidate_answers'], _temp_batch_answer, other = other, device = device, dataset = data_name, multi_task = True if task == ["all"] else False)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
# correct_answer = question['answer']
if check_answer_equality(correct_answer, output[ind]) :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if question['q_type'] in ['YN']:
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output[ind] == ['Yes']: TPFP[0] += 1
elif output[ind] == ['No']: TPFP[1] += 1
elif output[ind] == ['DK']: TPFP[2] += 1
if output[ind] == correct_answer == ['Yes']: TP[0] += 1
elif output[ind] == correct_answer == ['No']: TP[1] += 1
elif output[ind] == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
if task == ["all"] and data_name == "spartun":
if qtype == ["YN"]: qtype = ["FR"]
else: qtype = ["YN"]
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if "YN" in task or "all" in task :
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1)
return losses, (correct/ all_q,)
def check_answer_equality(correct_answer, prediction):
correct_answer = [x.lower() if type(x) == str else x for x in correct_answer ]
correct_answer.sort()
prediction = [x.lower() if type(x) == str else x for x in prediction ]
prediction.sort()
if prediction == correct_answer: return True
return False
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def concate_input_components(all_texts, baseline):
new_input = "" #if baseline == "roberta" else "[CLS] "
for text in all_texts:
if new_input != "" and baseline == "roberta": new_input += "<s> "
new_input += text
if text != all_texts[-1]: new_input += " </s> " if baseline == "roberta" else " [SEP] "
return new_input
# def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/babi/train.json') as json_file:
# data = json.load(json_file)
# number_samples = int((num_sample/8)+1) if num_sample else num_sample
# #random sampling or not
# random.seed(1)
# stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# for story in stories[:number_samples]:
# # if is_DK_babi(story['story'][0]): continue
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['story'][0]
# x = 0
# # each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = question['question']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
# loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
# #print("logit: ", logit , file = file)
# print("predict: ", output, file = file)
# correct_answer = question['answer']
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
# def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/boolQ/train.json') as json_file:
# data = json.load(json_file)
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# x = 0
# for story in data['data'][:num_sample]:
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['passage'][:1000]
# # print(story_txt)
# # each question (span)
# # for question in story['questions']:
# # q_text, q_emb= '', []
# # if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = story['question']
# answer = ['Yes'] if story['answer'] == True else ['No']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',answer, file = file)
# loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
# print("predict: ", output, file = file)
# correct_answer = answer
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 24,796 | 37.444961 | 260 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/sprlqa/test.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
, num_sample = None
, train_num = None
, unseen = False
, qtype = "YN"
, other = None
, data_name = "sprlqa"
, save_data = False
, device = "cpu"
, file = None
, epochs = 0
):
# initialize_tokenizer(baseline)
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
with open('dataset/sprlqa/'+test_or_dev+'_sprlqa.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
print('sample ',s_ind, file = file)
# story_txt = story['story'][0]
#QA tasks
# each question (span)
for question in story['questions']:
# q_text, q_emb= '', []
# q_text = question['question']
story_txt = ' '.join(story['story'][:question["num_1st_context_sentences"]+1])
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
input_text = concate_input_components([question['question'], story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, input_text, qtype, question['candidate_answers'], [], other=other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, input_text, qtype, question['candidate_answers'], [], other=other, device = device, dataset = data_name)
print("predict: ", output, file = file)
correct_answer = question['answer']
if check_answer_equality(correct_answer, output[0]):
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct/ all_q, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 11,190 | 34.86859 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/sprlqa/train.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = 'YN'
, data_name = "sprlqa"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
# initialize_tokenizer(baseline)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
with open('dataset/sprlqa/train_sprlqa.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
_temp_batch_input = []
_temp_batch_answer = []
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
if train_log: print('sample ',s_ind, file = file)
# story_txt = ' '.join(story['story'])
#QA tasks
# each question
for question in story['questions']:
q_text, q_emb= '', []
# if question['q_type'] in ['FA'] and question['start_end_char'] == []: continue
# if other == 'noDK' and question['answer'] == ['DK']: continue
# q_text = question['question']
story_txt = ' '.join(story['story'][:question["num_1st_context_sentences"]+1])
model.zero_grad()
all_q += 1
if train_log:
print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
print('Story:\n',story_txt, file = file)
_temp_batch_input += [concate_input_components([question['question'], story_txt], baseline)]
_temp_batch_answer += [question['answer']]
if len(_temp_batch_input) < batch_size : continue
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, qtype, question['candidate_answers'],_temp_batch_answer, other = other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
loss, output = boolean_classification(model, _temp_batch_input, qtype, question['candidate_answers'], _temp_batch_answer, other = other, device = device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
if check_answer_equality(correct_answer, output[ind]) :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[ind] == ['Yes']: TPFP[0] += 1
elif output[ind] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[ind] == correct_answer == ['Yes']: TP[0] += 1
elif output[ind] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1,)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/babi/train.json') as json_file:
data = json.load(json_file)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
#random sampling or not
random.seed(1)
stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit , file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/boolQ/train.json') as json_file:
data = json.load(json_file)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
x = 0
for story in data['data'][:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# print(story_txt)
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = story['question']
answer = ['Yes'] if story['answer'] == True else ['No']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',answer, file = file)
loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 17,477 | 36.266525 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/sprlqa/.ipynb_checkpoints/train-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = 'YN'
, data_name = "sprlqa"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
# initialize_tokenizer(baseline)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
with open('dataset/sprlqa/train_sprlqa.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
_temp_batch_input = []
_temp_batch_answer = []
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
if train_log: print('sample ',s_ind, file = file)
# story_txt = ' '.join(story['story'])
#QA tasks
# each question
for question in story['questions']:
q_text, q_emb= '', []
# if question['q_type'] in ['FA'] and question['start_end_char'] == []: continue
# if other == 'noDK' and question['answer'] == ['DK']: continue
# q_text = question['question']
story_txt = ' '.join(story['story'][:question["num_1st_context_sentences"]+1])
model.zero_grad()
all_q += 1
if train_log:
print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
print('Story:\n',story_txt, file = file)
_temp_batch_input += [concate_input_components([question['question'], story_txt], baseline)]
_temp_batch_answer += [question['answer']]
if len(_temp_batch_input) < batch_size : continue
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, qtype, question['candidate_answers'],_temp_batch_answer, other = other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
loss, output = boolean_classification(model, _temp_batch_input, qtype, question['candidate_answers'], _temp_batch_answer, other = other, device = device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
if check_answer_equality(correct_answer, output[ind]) :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[ind] == ['Yes']: TPFP[0] += 1
elif output[ind] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[ind] == correct_answer == ['Yes']: TP[0] += 1
elif output[ind] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1,)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/babi/train.json') as json_file:
data = json.load(json_file)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
#random sampling or not
random.seed(1)
stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit , file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/boolQ/train.json') as json_file:
data = json.load(json_file)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
x = 0
for story in data['data'][:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# print(story_txt)
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = story['question']
answer = ['Yes'] if story['answer'] == True else ['No']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',answer, file = file)
loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 17,477 | 36.266525 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/sprlqa/.ipynb_checkpoints/test-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
, num_sample = None
, train_num = None
, unseen = False
, qtype = "YN"
, other = None
, data_name = "sprlqa"
, save_data = False
, device = "cpu"
, file = None
, epochs = 0
):
# initialize_tokenizer(baseline)
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
with open('dataset/sprlqa/'+test_or_dev+'_sprlqa.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
print('sample ',s_ind, file = file)
# story_txt = story['story'][0]
#QA tasks
# each question (span)
for question in story['questions']:
# q_text, q_emb= '', []
# q_text = question['question']
story_txt = ' '.join(story['story'][:question["num_1st_context_sentences"]+1])
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
input_text = concate_input_components([question['question'], story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, input_text, qtype, question['candidate_answers'], [], other=other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, input_text, qtype, question['candidate_answers'], [], other=other, device = device, dataset = data_name)
print("predict: ", output, file = file)
correct_answer = question['answer']
if check_answer_equality(correct_answer, output[0]):
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct/ all_q, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 11,190 | 34.86859 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/StepGame/test.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
# from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
, num_sample = None
, train_num = '1' #it is the test/dev name
, unseen = False
, qtype = None
, other = None #, sent_num = '1'
, save_data = False
, data_name = None
, device = "cpu"
, file = None
, data = None
, epochs = 0
):
# initialize_tokenizer(baseline)
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
candidates = ['left', 'right', 'below', 'above', 'lower-left', 'upper-right', 'lower-right', 'upper-left', 'overlap']
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
if test_or_dev == "dev":
if int(train_num) <6: dev_num = train_num
else: dev_num = '5'
with open('/VL/space/rshnk/SpaRT_models/StepGame/Dataset/TrainVersion/qa'+str(dev_num)+'_valid.json') as json_file:
data = json.load(json_file)
else:
test_num = train_num
# with open('./dataset/new_test.json') as json_file:
with open('/VL/space/rshnk/SpaRT_models/StepGame/Dataset/TrainVersion/qa'+str(test_num)+'_test.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind in tqdm(list(data)[:num_sample]):
# s_ind+= 1
# print('sample ',s_ind)
#use k_fold for cross_evaluation
# if human and test_or_dev == 'dev' and s_ind not in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
print('sample ',s_ind, file = file)
story = data[s_ind]
story_txt = ' '.join(story['story'])
# each question (span)
q_text, q_emb= '', []
q_text = story['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',story['label'], file = file)
input_text = concate_input_components([q_text, story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, [input_text], 'FR', candidates, [], other=other, device = device, dataset = "stepgame")
# elif pretrain == 'bertbc':
# _, output = boolean_classification(model, q_text, story_txt, 'FR', candidates, candidates.index(story['label']), other, device)
print("predict: ", output[0], file = file)
correct_answer = [candidates.index(story['label'])]
if check_answer_equality(correct_answer, output[0]) :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
# if qtype == 'YN':
# print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
# Precision = np.nan_to_num(TP / TPFP)
# Recall = np.nan_to_num(TP / TPFN)
# F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
# Macro_F1 = np.average(F1[:2])
# print(test_or_dev, ' Final Precision: ', Precision, file = file)
# print(test_or_dev, ' Final Recall: ', Recall, file = file)
# print(test_or_dev, ' Final F1: ', F1, file = file)
# print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
# print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
# return (correct/ all_q, Macro_F1)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 11,232 | 35.235484 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/StepGame/train.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
def train (model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = '12345'
, qtype = None
, data_name = "stepgame"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
# initialize_tokenizer(baseline)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
_temp_batch_input = []
_temp_batch_answer = []
if type(train_num) != str:
train_address = '/VL/space/rshnk/SpaRT_models/StepGame/Dataset/TrainVersion/train.json'
else:
train_address = '/VL/space/rshnk/SpaRT_models/StepGame/Dataset/json_format/clean/qa'+str(train_num)+'_train.json'
with open(train_address) as json_file:
data = json.load(json_file)
# if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
candidates = ['left', 'right', 'below', 'above', 'lower-left', 'upper-right', 'lower-right', 'upper-left', 'overlap']
for s_ind in tqdm(list(data)[:num_sample]):
# s_ind+= 1
# print('sample ',s_ind)
if int(s_ind)< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
if train_log: print('sample ',s_ind, file = file)
story = data[s_ind]
story_txt = ' '.join(story['story'])
x = 1
#QA tasks
q_text = story['question']
model.zero_grad()
all_q += 1
if train_log:
print('question: ', q_text, '\nanswer: ',story['label'], file = file)
print('Story:\n',story_txt, file = file)
_temp_batch_input += [concate_input_components([q_text, story_txt], baseline)]
_temp_batch_answer += [[candidates.index(story['label'])]]
if len(_temp_batch_input) < batch_size : continue
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, 'FR', candidates, _temp_batch_answer, other = other, device = device, dataset = "stepgame")
# elif pretrain == 'bertbc':
# loss, output = boolean_classification(model, _temp_batch_input, 'FR', candidates, candidates.index(story['label']), other, device)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
if check_answer_equality(correct_answer, output[ind]) :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/babi/train.json') as json_file:
data = json.load(json_file)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
#random sampling or not
random.seed(1)
stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit , file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/boolQ/train.json') as json_file:
data = json.load(json_file)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
x = 0
for story in data['data'][:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# print(story_txt)
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = story['question']
answer = ['Yes'] if story['answer'] == True else ['No']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',answer, file = file)
loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 15,959 | 35.521739 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/StepGame/.ipynb_checkpoints/train-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
def train (model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = '12345'
, qtype = None
, data_name = "stepgame"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
# initialize_tokenizer(baseline)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
_temp_batch_input = []
_temp_batch_answer = []
with open('/VL/space/rshnk/SpaRT_models/StepGame/Dataset/json_format/clean/qa'+str(train_num)+'_train.json') as json_file:
data = json.load(json_file)
# if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
candidates = ['left', 'right', 'below', 'above', 'lower-left', 'upper-right', 'lower-right', 'upper-left', 'overlap']
for s_ind in tqdm(list(data)[:num_sample]):
# s_ind+= 1
# print('sample ',s_ind)
if int(s_ind)< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
if train_log: print('sample ',s_ind, file = file)
story = data[s_ind]
story_txt = ' '.join(story['story'])
x = 1
#QA tasks
q_text = story['question']
model.zero_grad()
all_q += 1
if train_log:
print('question: ', q_text, '\nanswer: ',story['label'], file = file)
print('Story:\n',story_txt, file = file)
_temp_batch_input += [concate_input_components([q_text, story_txt], baseline)]
_temp_batch_answer += [candidates.index(story['label'])]
if len(_temp_batch_input) < batch_size : continue
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, 'FR', candidates, _temp_batch_answer, other = other, device = device, dataset = "stepgame")
# elif pretrain == 'bertbc':
# loss, output = boolean_classification(model, _temp_batch_input, 'FR', candidates, candidates.index(story['label']), other, device)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
if check_answer_equality(correct_answer, output[ind]) :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/babi/train.json') as json_file:
data = json.load(json_file)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
#random sampling or not
random.seed(1)
stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit , file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/boolQ/train.json') as json_file:
data = json.load(json_file)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
x = 0
for story in data['data'][:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# print(story_txt)
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = story['question']
answer = ['Yes'] if story['answer'] == True else ['No']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',answer, file = file)
loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 15,774 | 35.431871 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/StepGame/.ipynb_checkpoints/test-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
# from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
, num_sample = None
, train_num = None
, unseen = False
, qtype = None
, other = None
, sent_num = '0'
, save_data = False
, device = "cpu"
, file = None
, data = None
, epochs = 0
):
# initialize_tokenizer(baseline)
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
candidates = ['left', 'right', 'below', 'above', 'lower-left', 'upper-right', 'lower-right', 'upper-left', 'overlap']
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
if test_or_dev == "dev":
with open('/VL/space/rshnk/SpaRT_models/StepGame/Dataset/json_format/clean/qa'+str(sent_num)+'_valid.json') as json_file:
data = json.load(json_file)
else:
# with open('./dataset/new_test.json') as json_file:
with open('/VL/space/rshnk/SpaRT_models/StepGame/Dataset/json_format/noise/qa'+str(sent_num)+'_test.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind in tqdm(list(data)[:num_sample]):
# s_ind+= 1
# print('sample ',s_ind)
#use k_fold for cross_evaluation
# if human and test_or_dev == 'dev' and s_ind not in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
print('sample ',s_ind, file = file)
story = data[s_ind]
story_txt = ' '.join(story['story'])
# each question (span)
q_text, q_emb= '', []
q_text = story['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',story['label'], file = file)
input_text = concate_input_components([q_text, story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, [input_text], 'FR', candidates, [], other=other, device = device, dataset = "stepgame")
# elif pretrain == 'bertbc':
# _, output = boolean_classification(model, q_text, story_txt, 'FR', candidates, candidates.index(story['label']), other, device)
print("predict: ", output[0], file = file)
correct_answer = [candidates.index(story['label'])]
if check_answer_equality(correct_answer, output[0]) :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
# if qtype == 'YN':
# print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
# Precision = np.nan_to_num(TP / TPFP)
# Recall = np.nan_to_num(TP / TPFN)
# F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
# Macro_F1 = np.average(F1[:2])
# print(test_or_dev, ' Final Precision: ', Precision, file = file)
# print(test_or_dev, ' Final Recall: ', Recall, file = file)
# print(test_or_dev, ' Final F1: ', F1, file = file)
# print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
# print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
# return (correct/ all_q, Macro_F1)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 11,087 | 35 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/babi/test.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
# from QA.babi.train import question_to_sentence, F1_measure, precision, recall, confusion_matrix
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
,num_sample = None
, train_num = None
,unseen = False
, qtype = None
, other = None
, data_name = "babi"
, save_data = False
, device = "cpu"
, file = None
, epochs = 0
):
# initialize_tokenizer(baseline)
#import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
candidate_answer = ["Yes", "No"] if qtype == "YN" else ["left", "right", "above", "below"]
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
if train_num == "train10k":
training_number = '10k'
else:
training_number = '1k'
address = "dataset/babi/"+training_number+'/'+("17" if qtype == "YN" else "19")+"/"+test_or_dev+".json"
with open(address) as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
#QA tasks
# each question (span)
for question in story['questions']:
# q_text, q_emb= '', []
# q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
input_text = concate_input_components([question['question'], story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, [input_text], question['q_type'], candidate_answer, [], device = device, dataset = data_name)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, [input_text], question['q_type'], candidate_answer, [], device = device, dataset = data_name)
print("predict: ", output[0], file = file)
correct_answer = question['answer']
if check_answer_equality(correct_answer, output[0]):
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
# if qtype == 'FR' and human:
# # print(correct_answer, output)
# if 4 in correct_answer: correct_answer.remove(4)
# if 5 in correct_answer: correct_answer.remove(5)
# if 4 in output: output.remove(4)
# if 5 in output: output.remove(5)
# if correct_answer == output :
# correct_no_distance += 1
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# # print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct/ all_q, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 12,331 | 36.256798 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/babi/train.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
# from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = None
, data_name = "babi"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
# initialize_tokenizer(baseline)
candidate_answer = ["Yes", "No"] if qtype == "YN" else ["left", "right", "above", "below"]
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
if train_num == "train10k":
training_number = '10k'
else:
training_number = '1k'
address = "dataset/babi/"+training_number+'/'+("17" if qtype == "YN" else "19")+"/train.json"
with open(address) as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
_temp_batch_input = []
_temp_batch_answer = []
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
story_txt = story_txt = story['story'][0]
if train_log:
print('sample ',s_ind, file = file)
print('Story:\n',story_txt, file = file)
#QA tasks
# each question
for question in story['questions']:
q_text, q_emb= '', []
model.zero_grad()
all_q += 1
if train_log: print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
_temp_batch_input += [concate_input_components([question['question'], story_txt], baseline)]
_temp_batch_answer += [question['answer']]
if len(_temp_batch_input) < batch_size : continue
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, question['q_type'], candidate_answer, _temp_batch_answer, other = other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
loss, output = boolean_classification(model,_temp_batch_input, question['q_type'], candidate_answer, _temp_batch_answer, other = other, device = device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
if check_answer_equality(correct_answer, output[ind]):
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1,)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/babi/train.json') as json_file:
data = json.load(json_file)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
#random sampling or not
random.seed(1)
stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit , file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/boolQ/train.json') as json_file:
data = json.load(json_file)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
x = 0
for story in data['data'][:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# print(story_txt)
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = story['question']
answer = ['Yes'] if story['answer'] == True else ['No']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',answer, file = file)
loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 17,496 | 35.835789 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/babi/.ipynb_checkpoints/train-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
# from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = None
, data_name = "babi"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
# initialize_tokenizer(baseline)
candidate_answer = ["Yes", "No"] if qtype == "YN" else ["left", "right", "above", "below"]
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
if train_num == "train10k":
training_number = '10k'
else:
training_number = '1k'
address = "dataset/babi/"+training_number+'/'+("17" if qtype == "YN" else "19")+"/train.json"
with open(address) as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
_temp_batch_input = []
_temp_batch_answer = []
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
story_txt = story_txt = story['story'][0]
if train_log:
print('sample ',s_ind, file = file)
print('Story:\n',story_txt, file = file)
#QA tasks
# each question
for question in story['questions']:
q_text, q_emb= '', []
model.zero_grad()
all_q += 1
if train_log: print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
_temp_batch_input += [concate_input_components([question['question'], story_txt], baseline)]
_temp_batch_answer += [question['answer']]
if len(_temp_batch_input) < batch_size : continue
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, question['q_type'], candidate_answer, _temp_batch_answer, other = other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
loss, output = boolean_classification(model,_temp_batch_input, question['q_type'], candidate_answer, _temp_batch_answer, other = other, device = device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
if check_answer_equality(correct_answer, output[ind])
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1,)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/babi/train.json') as json_file:
data = json.load(json_file)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
#random sampling or not
random.seed(1)
stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit , file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('./dataset/boolQ/train.json') as json_file:
data = json.load(json_file)
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
x = 0
for story in data['data'][:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# print(story_txt)
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
x+=1
q_text = story['question']
answer = ['Yes'] if story['answer'] == True else ['No']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',answer, file = file)
loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 17,481 | 35.959831 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/babi/.ipynb_checkpoints/test-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
# from QA.babi.train import question_to_sentence, F1_measure, precision, recall, confusion_matrix
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
from QA.train import check_answer_equality, concate_input_components
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
,num_sample = None
, train_num = None
,unseen = False
, qtype = None
, other = None
, data_name = "babi"
, save_data = False
, device = "cpu"
, file = None
, epochs = 0
):
#import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
candidate_answer = ["Yes", "No"] if qtype == "YN" else ["left", "right", "above", "below"]
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
if train_num == "train10k":
training_number = '10k'
else:
training_number = '1k'
address = "dataset/babi/"+training_number+'/'+("17" if qtype == "YN" else "19")+"/"+test_or_dev+".json"
with open(address) as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*2), np.array([0]*2), np.array([0]*2)
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
#QA tasks
# each question (span)
for question in story['questions']:
# q_text, q_emb= '', []
# q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', question['question'], '\nanswer: ',question['answer'], file = file)
input_text = concate_input_components([question['question'], story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, [input_text], question['q_type'], candidate_answer, [], device = device, dataset = data_name)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, [input_text], question['q_type'], candidate_answer, [], device = device, dataset = data_name)
print("predict: ", output[0], file = file)
correct_answer = question['answer']
if check_answer_equality(correct_answer, output[0]):
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
# elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
# elif output == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
# elif output == correct_answer == ['DK']: TP[2] += 1
# if qtype == 'FR' and human:
# # print(correct_answer, output)
# if 4 in correct_answer: correct_answer.remove(4)
# if 5 in correct_answer: correct_answer.remove(5)
# if 4 in output: output.remove(4)
# if 5 in output: output.remove(5)
# if correct_answer == output :
# correct_no_distance += 1
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# # print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct/ all_q, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 12,294 | 36.257576 | 215 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/.ipynb_checkpoints/testold-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from QA.train import question_to_sentence, F1_measure, precision, recall, confusion_matrix
from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
,num_sample = None
, train_num = None
,unseen = False
, qtype = None
, other = None
, data_name = False
, save_data = False
, device = "cpu"
, file = None
, data = None
, epochs = 0
):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
all_q = 0
correct = 0
correct_no_distance = 0
# s_ind = 0
correct_consistency, consistency_total =0, 0
# k_fold = 7
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
if unseen:
with open('./dataset/unseen_test.json') as json_file:
data = json.load(json_file)
elif data_name == "human":
#using k_fold
# if test_or_dev == 'dev':
# with open('./dataset/human_train.json') as json_file:
# data = json.load(json_file)
# else:
with open('./dataset/human_'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
elif data_name == "spartun":
with open('dataset/SpaRTUN/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
else:
# with open('./dataset/new_test.json') as json_file:
with open('dataset/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
#use k_fold for cross_evaluation
# if human and test_or_dev == 'dev' and s_ind not in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
#MLM tasks
if pretrain == 'mlm':
# story_txt = "the square is above the cicle. the circle is above the rectangle. the square is above rectangle."
print('Story:\n',story_txt, file = file)
tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
# print(question['q_id'],q_type, question['candidate_answers'], question['answer'][0])
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
print('Question: ',q_text,'\nAnswer: ', answer, file = file)
_, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'mlmr':
print('Story:\n',story_txt, file = file)
all_q += 1
_, output, truth = Masked_LM_random(model, story_txt, s_ind, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
#QA tasks
else:
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]: #and len(question['answer']) == 1: #and x == 0:
if question['q_type'] in ['FA'] and question['start_end_char'] == []: continue
if other == 'noDK' and question['answer'] == ['DK']: continue
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
if pretrain == 'bertqa':
correct_start_end_word = correct_token_id(story_txt, q_text, question['start_end_char'],tokenizing, file)
_, output, start, end = question_answering(model, q_text, story_txt,correct_start_end_word, device)
print("Correct start end: ", correct_start_end_word, "\npredict: ", output, start, end, "\nstart end:", question['start_end_char'], file = file)
if question['answer'][0] == output and (start == correct_start_end_word[0] and end == correct_start_end_word[1]):
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'bertmc':
_, output = multiple_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
if qtype == 'FR' and data_name == "human":
# print(correct_answer, output)
if 4 in correct_answer: correct_answer.remove(4)
if 5 in correct_answer: correct_answer.remove(5)
if 4 in output: output.remove(4)
if 5 in output: output.remove(5)
if correct_answer == output :
correct_no_distance += 1
print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
output.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
if qtype == 'FR' and data_name == "human":
# print(correct_answer, output)
if 4 in correct_answer: correct_answer.remove(4)
if 5 in correct_answer: correct_answer.remove(5)
if 4 in output: output.remove(4)
if 5 in output: output.remove(5)
if correct_answer == output :
correct_no_distance += 1
print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
if unseen:
print(test_or_dev+ ' unseen Final accuracy: ', correct/ all_q)
print(test_or_dev+' unseen Final accuracy: ', correct/ all_q, file = file)
else:
print(test_or_dev, ' Final accuracy: ', correct/ all_q)
print(test_or_dev, ' Final accuracy: ', correct/ all_q, file = file)
if data_name == "human" and qtype == 'FR':
print(test_or_dev, ' accuracy with no distance: ', correct_no_distance/ all_q, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct/ all_q, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 20,013 | 40.958071 | 241 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/.ipynb_checkpoints/train_old-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from BERT import tokenizing
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
# from ALBERT import tokenizing
# from XLNet import tokenizing
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = None
, human = False
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
, dataset = "spartqa"
):
#import baseline
# if baseline == 'bert':
# initialize_tokenizer(baseline)
# from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
if human:
with open('./dataset/human_train.json') as json_file:
data = json.load(json_file)
elif train_num == 'train24k':
with open('./dataset/train_24k.json') as json_file:
data = json.load(json_file)
elif train_num == 'train100k':
with open('./dataset/train_100k.json') as json_file:
data = json.load(json_file)
elif train_num == 'train500':
with open('./dataset/train_500.json') as json_file:
data = json.load(json_file)
elif other == 'unseen' :
with open('./dataset/unseen_test.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/train.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
if train_log: print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
#MLM tasks
#TODO add batch
if pretrain == 'mlm':
tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
if train_log: print('Question: ',q_text,'\nAnswer: ', answer, file = file)
loss, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#TODO add batch
elif pretrain == 'mlmr':
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
all_q += 1
loss, output, truth = Masked_LM_random(model, story_txt, s_ind+1, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#QA tasks
else:
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] :
if question['q_type'] in ['FA'] and question['start_end_char'] == []: continue
if other == 'noDK' and question['answer'] == ['DK']: continue
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
if train_log: print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
if pretrain == 'bertqa':
correct_start_end_word = correct_token_id(story_txt, q_text, question['start_end_char'], tokenizing, file)
#sent to model
loss, output, start, end = question_answering(model, q_text, story_txt, correct_start_end_word, device)
if train_log: print("Correct start end: ", correct_start_end_word, "\npredict: ", output, start, end, "\nstart end:", question['start_end_char'], file = file)
if question['answer'][0] == output and (start == correct_start_end_word[0] and end == correct_start_end_word[1]):
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'bertmc':
loss, output = multiple_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device)
if train_log: print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
elif pretrain == 'bertbc':
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device)
if train_log: print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
output.sort()
if correct_answer == output :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
# def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/babi/train.json') as json_file:
# data = json.load(json_file)
# number_samples = int((num_sample/8)+1) if num_sample else num_sample
# #random sampling or not
# random.seed(1)
# stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# for story in stories[:number_samples]:
# # if is_DK_babi(story['story'][0]): continue
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['story'][0]
# x = 0
# # each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = question['question']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
# loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
# #print("logit: ", logit , file = file)
# print("predict: ", output, file = file)
# correct_answer = question['answer']
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
# def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/boolQ/train.json') as json_file:
# data = json.load(json_file)
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# x = 0
# for story in data['data'][:num_sample]:
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['passage'][:1000]
# # print(story_txt)
# # each question (span)
# # for question in story['questions']:
# # q_text, q_emb= '', []
# # if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = story['question']
# answer = ['Yes'] if story['answer'] == True else ['No']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',answer, file = file)
# loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
# print("predict: ", output, file = file)
# correct_answer = answer
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 24,128 | 38.751236 | 237 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/.ipynb_checkpoints/train-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
# from BERT import tokenizing
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
# from ALBERT import tokenizing
# from XLNet import tokenizing
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = None
, data_name = "spartqa"
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
):
#import baseline
# if baseline == 'bert':
# initialize_tokenizer(baseline)
# from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
"""
For computing the batch we store each input (Question+Context+Rule) and seperate thing for amswer.
If the size of stored batch is equal to batch_size then we pass them to the model
For spartuntrain on all (FR and YN) after each passing we change the qtype
all for spartqa = [CO, FB, FR, and YN]
amd for spartun = [FR, YN]
"""
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
task = [qtype]
qtypes = ["YN", "FR"]
# qtypes = ["YN", "FR", "FB", "CO"] if data_name == "spartqa" else ["YN", "FR"]
qtype = qtypes if qtype == "all" else [qtype]
if data_name == "human":
with open('./dataset/human_train.json') as json_file:
data = json.load(json_file)
data_name = "spartqa"
elif data_name == "spartun":
with open('./dataset/SpaRTUN/train.json') as json_file:
data = json.load(json_file)
if task == ["all"]: qtype = ["YN"]
# if qtype == "all": qtype = ["YN"] # change alternatively
# else: qtype = [qtype]
else:
with open('dataset/train.json') as json_file:
data = json.load(json_file)
if 'YN' in qtype: TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
_temp_batch_input = []
_temp_batch_answer = []
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
story_txt = story['story'][0]
if train_log:
print('sample ',s_ind, file = file)
print('Story:\n',story_txt, file = file)
x = 0
#MLM tasks
#TODO add batch, add spartun. Now it is set on spartqa
if pretrain == 'mlm':
if data_name == "spartqa": tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
else: tasks_list = ['FR', 'YN'] if qtype == 'all' else [qtype]
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
if train_log: print('Question: ',q_text,'\nAnswer: ', answer, file = file)
loss, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#TODO add batch, add spartun. Now it is set on spartqa
elif pretrain == 'mlmr':
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
all_q += 1
loss, output, truth = Masked_LM_random(model, story_txt, s_ind+1, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#QA tasks
else:
"""
based on the batch_size:
"""
# print('Story:\n',story_txt, file = file)
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
model.zero_grad()
if question['q_type'] in qtype :
if other == 'noDK' and question['answer'] == ['DK']: continue
x+=1
q_text = question['question']
if train_log: print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
all_q += 1
"""
add input and answer to the batch
if len(batch) == batch_size pass to the model
else: continue
"""
_temp_batch_input += [concate_input_components([q_text, story_txt], baseline)]
_temp_batch_answer += [question['answer']]
if len(_temp_batch_input) < batch_size : continue
#if batch is full it comes here
if pretrain == 'bertmc':
loss, output = multiple_classification(model, _temp_batch_input, question['q_type'], question['candidate_answers'], _temp_batch_answer, other = other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
loss, output = boolean_classification(model, _temp_batch_input, question['q_type'], question['candidate_answers'], _temp_batch_answer, other = other, device = device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
for ind, correct_answer in enumerate(_temp_batch_answer):
# correct_answer = question['answer']
if check_answer_equality(correct_answer, output[ind]) :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if question['q_type'] in ['YN']:
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output[ind] == ['Yes']: TPFP[0] += 1
elif output[ind] == ['No']: TPFP[1] += 1
elif output[ind] == ['DK']: TPFP[2] += 1
if output[ind] == correct_answer == ['Yes']: TP[0] += 1
elif output[ind] == correct_answer == ['No']: TP[1] += 1
elif output[ind] == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
_temp_batch_answer = []
_temp_batch_input = []
if task == ["all"] and data_name == "spartun":
if qtype == ["YN"]: qtype = ["FR"]
else: qtype = ["YN"]
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if "YN" in task or "all" in task :
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1)
return losses, (correct/ all_q,)
def check_answer_equality(correct_answer, prediction):
correct_answer = [x.lower() if type(x) == str else x for x in correct_answer ]
correct_answer.sort()
prediction = [x.lower() if type(x) == str else x for x in prediction ]
prediction.sort()
if prediction == correct_answer: return True
return False
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def concate_input_components(all_texts, baseline):
new_input = "" #if baseline == "roberta" else "[CLS] "
for text in all_texts:
if new_input != "" and baseline == "roberta": new_input += "<s> "
new_input += text
if text != all_texts[-1]: new_input += " </s> " if baseline == "roberta" else " [SEP] "
return new_input[:-1]
# def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/babi/train.json') as json_file:
# data = json.load(json_file)
# number_samples = int((num_sample/8)+1) if num_sample else num_sample
# #random sampling or not
# random.seed(1)
# stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# for story in stories[:number_samples]:
# # if is_DK_babi(story['story'][0]): continue
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['story'][0]
# x = 0
# # each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = question['question']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
# loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
# #print("logit: ", logit , file = file)
# print("predict: ", output, file = file)
# correct_answer = question['answer']
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
# def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/boolQ/train.json') as json_file:
# data = json.load(json_file)
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# x = 0
# for story in data['data'][:num_sample]:
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['passage'][:1000]
# # print(story_txt)
# # each question (span)
# # for question in story['questions']:
# # q_text, q_emb= '', []
# # if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = story['question']
# answer = ['Yes'] if story['answer'] == True else ['No']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',answer, file = file)
# loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
# print("predict: ", output, file = file)
# correct_answer = answer
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 24,362 | 37.246468 | 237 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/.ipynb_checkpoints/test-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from QA.train import question_to_sentence, F1_measure, precision, recall, confusion_matrix, concate_input_components, check_answer_equality
from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
# from BERT import tokenizing
# from ALBERT import tokenizing
# from XLNet import tokenizing
def test(model
, pretrain = "bertbc"
, baseline = "bert"
, test_or_dev = "test"
,num_sample = None
, train_num = None
, unseen = False
, qtype = None
, other = None
, data_name = "spartqa"
, save_data = False
, device = "cpu"
, file = None
, epochs = 0
):
# initialize_tokenizer(baseline)
#import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
all_q = 0
all_q_YN = 0
all_q_FR = 0
correct = 0
correct_YN = 0
correct_FR = 0
correct_no_distance = 0
# s_ind = 0
task = [qtype]
correct_consistency, consistency_total =0, 0
qtypes = ["YN", "FR"]
# qtypes = ["YN", "FR", "FB", "CO"] if data_name == "spartqa" else ["YN", "FR"]
qtype = qtypes if qtype == "all" else [qtype]
# k_fold = 7
model.eval()
is_human = False
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
if unseen:
with open('./dataset/unseen_test.json') as json_file:
data = json.load(json_file)
elif data_name == "human":
with open('./dataset/human_'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
is_human = True
data_name = "spartqa"
elif data_name == "spartun":
if other == "simple":
with open('dataset/SpaRTUN/'+test_or_dev+'_simple.json') as json_file:
data = json.load(json_file)
elif other == "clock":
with open('dataset/SpaRTUN/'+test_or_dev+'_clock.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/SpaRTUN/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
else:
# with open('./dataset/new_test.json') as json_file:
with open('dataset/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
if 'YN' in task or "all" in task: TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
#use k_fold for cross_evaluation
# if human and test_or_dev == 'dev' and s_ind not in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
#MLM tasks
if pretrain == 'mlm':
# story_txt = "the square is above the cicle. the circle is above the rectangle. the square is above rectangle."
print('Story:\n',story_txt, file = file)
tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
# print(question['q_id'],q_type, question['candidate_answers'], question['answer'][0])
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
print('Question: ',q_text,'\nAnswer: ', answer, file = file)
_, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'mlmr':
print('Story:\n',story_txt, file = file)
all_q += 1
_, output, truth = Masked_LM_random(model, story_txt, s_ind, other, device, file)
# print("predict: ", output)
print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
#QA tasks
else:
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in qtype: #and len(question['answer']) == 1: #and x == 0:
if other == 'noDK' and question['answer'] == ['DK']: continue
q_text = question['question']
all_q += 1
if question['q_type'] == "YN": all_q_YN +=1
elif question["q_type"] == "FR": all_q_FR += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
input_text = concate_input_components([q_text, story_txt], baseline)
if pretrain == 'bertmc':
_, output = multiple_classification(model, [input_text], question['q_type'], question['candidate_answers'], [], other=other, device = device, dataset = data_name)
elif pretrain == 'bertbc':
_, output = boolean_classification(model, [input_text], question['q_type'], question['candidate_answers'], [], other=other, device = device, dataset = data_name)
print("predict: ", output[0], file = file)
correct_answer = question["answer"]
if check_answer_equality(correct_answer, output[0]) :
correct+=1
if question["q_type"] == "YN": correct_YN +=1
if question["q_type"] == "FR": correct_FR += 1
print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# else: print(s_ind, 'wrong')
if question['q_type'] == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output[0] == ['Yes']: TPFP[0] += 1
elif output[0] == ['No']: TPFP[1] += 1
elif output[0] == ['DK']: TPFP[2] += 1
if output[0] == correct_answer == ['Yes']: TP[0] += 1
elif output[0] == correct_answer == ['No']: TP[1] += 1
elif output[0] == correct_answer == ['DK']: TP[2] += 1
if question['q_type'] == 'FR' and is_human:
# print(correct_answer, output)
if 4 in correct_answer: correct_answer.remove(4)
if 5 in correct_answer: correct_answer.remove(5)
if 4 in output[0]: output[0].remove(4)
if 5 in output[0]: output[0].remove(5)
if correct_answer == output[0] :
correct_no_distance += 1
print('total: ', all_q, ' correct_no_dist: ', correct_no_distance, file = file)
# print('total: ', all_q, ' correct_no_dist: ', correct_no_distance)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy: ', correct/ all_q)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy: ', correct/ all_q, file = file)
if task == "all":
if all_q_YN:
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on YN: ', correct_YN/ all_q_YN)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on YN: ', correct_YN/ all_q_YN, file = file)
if all_q_FR:
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on FR: ', correct_FR/ all_q_FR)
print(test_or_dev, ' Final '+'unseen'if unseen else ''+' accuracy on FR: ', correct_FR/ all_q_FR, file = file)
# TODO changed
if is_human and ('FR' in task or "all" in task):
print(test_or_dev, ' accuracy with no distance: ', correct_no_distance/ all_q, file = file)
if 'YN' in task or "all" in task:
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print(test_or_dev, ' Final Precision: ', Precision, file = file)
print(test_or_dev, ' Final Recall: ', Recall, file = file)
print(test_or_dev, ' Final F1: ', F1, file = file)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1)
print(test_or_dev, ' Final Macro_F1: ', Macro_F1, file = file)
return (correct_YN / all_q_YN, Macro_F1,)
return (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
def test_babi(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
with open('dataset/babi/'+test_or_dev+'.json') as json_file:
data = json.load(json_file)
all_q = 0
correct = 0
s_ind = 0
#random sampling or not
random.seed(1)
stories = data['data'] #if other != 'random' else random.sample(data['data'], num_sample)
number_samples = int((num_sample/8)+1) if num_sample else num_sample
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in stories[:number_samples]:
# if is_DK_babi(story['story'][0]): continue
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['story'][0]
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype]:
q_text = question['question']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
_, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
#print("logit: ", logit, file = file)
print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print(test_or_dev,' Final accuracy: ', correct/ all_q)
print(test_or_dev,' Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def test_boolq(model, pretrain, baseline, test_or_dev,num_sample,unseen, qtype, other, device, file):
#import baseline
if baseline == 'bert':
from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'xlnet':
from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
elif baseline == 'albert':
from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
start_number = 0
if test_or_dev == 'dev':
with open('dataset/boolQ/dev.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/boolQ/test_1.json') as json_file:
data = json.load(json_file)
start_number = len(data['data']) - num_sample
num_sample = None
all_q = 0
correct = 0
s_ind = 0
model.eval()
# with no auto gradient calculation, torch runs a bit faster
with torch.no_grad():
for story in data['data'][start_number:num_sample]:
s_ind+= 1
print('sample ',s_ind)
print('sample ',s_ind, file = file)
story_txt = story['passage'][:1000]
# each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype]:
q_text = story['question']+'?'
answer = ['Yes'] if story['answer'] == True else ['No']
all_q += 1
print('Story:\n',story_txt, file = file)
print('question: ', q_text, '\nanswer: ', answer, file = file)
_, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
print("predict: ", output, file = file)
correct_answer = answer
correct_answer.sort()
if correct_answer == output :
correct+=1
print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print('Test Final accuracy: ', correct/ all_q)
print('Test Final accuracy: ', correct/ all_q, file = file)
return correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
| 18,533 | 39.734066 | 241 | py |
Spatial-QA-tasks | Spatial-QA-tasks-main/QA/.ipynb_checkpoints/trainold-checkpoint.py | import json
import re
import random
import torch
from tqdm import tqdm
import numpy as np
import torch.nn as nn
from BERT import tokenizing
# from Create_LM_input_output import tokenizing, boolean_classification, multiple_classification, initialize_tokenizer
# from ALBERT import tokenizing
# from XLNet import tokenizing
def train(model
, criterion
, optimizer
, pretrain = "bertbc"
, baseline = "bert"
, start = 0
, num_sample = None
, train_num = None
, qtype = None
, data_name = False
, other = None
, device = "cpu"
, train_log = False
, file = None
, epochs = 0
, batch_size = None
, dataset = "spartqa"
):
#import baseline
if baseline == 'bert':
# initialize_tokenizer(baseline)
from BERT import question_answering, tokenizing, boolean_classification, Masked_LM, Masked_LM_random, token_classification, multiple_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
model.train()
all_q = 0
correct = 0
s_ind = 0
losses = []
# k_fold = 7
if data_name == "human":
with open('./dataset/human_train.json') as json_file:
data = json.load(json_file)
elif data_name == "spartun":
with open('./dataset/SpaRTUN/train.json') as json_file:
data = json.load(json_file)
# if task == ["all"]: qtype = ["YN"]
elif train_num == 'train24k':
with open('./dataset/train_24k.json') as json_file:
data = json.load(json_file)
elif train_num == 'train100k':
with open('./dataset/train_100k.json') as json_file:
data = json.load(json_file)
elif train_num == 'train500':
with open('./dataset/train_500.json') as json_file:
data = json.load(json_file)
elif other == 'unseen' :
with open('./dataset/unseen_test.json') as json_file:
data = json.load(json_file)
else:
with open('dataset/train.json') as json_file:
data = json.load(json_file)
if qtype == 'YN': TPFN, TP, TPFP = np.array([0]*3), np.array([0]*3), np.array([0]*3)
for s_ind, story in enumerate(tqdm(data['data'][:num_sample])):
# s_ind+= 1
# print('sample ',s_ind)
if s_ind< start:continue
# samples [epochs*k_fold, (epochs*k_fold)+k_fold] considered as dev
# if human and s_ind in range((epochs%6)*k_fold, ((epochs%6)*k_fold)+k_fold): continue
if train_log: print('sample ',s_ind, file = file)
story_txt = story['story'][0]
x = 0
#MLM tasks
#TODO add batch
if pretrain == 'mlm':
tasks_list = ['FB', 'FR', 'CO'] if qtype == 'all' else [qtype]
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
for question in story['questions']:
q_text, q_emb= '', []
q_type = question['q_type']
if q_type in tasks_list :
q_text = question_to_sentence(question['question'], question['q_type'], question['answer'], question['candidate_answers'])
if len(question['answer'])> 1 or (q_type == 'FB' and question['answer'] == []) or (q_type == 'FR' and 7 in question['answer']) or (q_type == 'CO' and (question['answer'] == [2] or question['answer'] == [3])): continue
answer = question['answer'][0] if q_type == 'FB' else question['candidate_answers'][question['answer'][0]]
if q_type == 'CO' and ('which' in answer or 'in' in answer or 'that' in answer): continue
all_q += 1
if train_log: print('Question: ',q_text,'\nAnswer: ', answer, file = file)
loss, output, truth = Masked_LM(model, story_txt, q_text, answer, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#TODO add batch
elif pretrain == 'mlmr':
model.zero_grad()
# story_txt = 'The circle is above the triangle and the blue square. the blue square is below the circle.'
if train_log: print('Story:\n',story_txt, file = file)
all_q += 1
loss, output, truth = Masked_LM_random(model, story_txt, s_ind+1, other, device, file)
# print("predict: ", output)
if train_log: print("truth: ", truth, "\npredict: ", output, file = file)
# print("truth: ", truth, "\npredict: ", output)
correct_temp = 0
for i in range(len(output)):
if output[i] == truth[i]: correct_temp+=1
correct += correct_temp / len(output)
# if correct_temp / len(output) == 1:
# correct += 1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
#QA tasks
else:
# each question (span)
for question in story['questions']:
q_text, q_emb= '', []
if question['q_type'] in [qtype] :
if question['q_type'] in ['FA'] and question['start_end_char'] == []: continue
if other == 'noDK' and question['answer'] == ['DK']: continue
x+=1
q_text = question['question']
model.zero_grad()
all_q += 1
print('Story:\n',story_txt, file = file)
if train_log: print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
if pretrain == 'bertqa':
correct_start_end_word = correct_token_id(story_txt, q_text, question['start_end_char'], tokenizing, file)
#sent to model
loss, output, start, end = question_answering(model, q_text, story_txt, correct_start_end_word, device)
if train_log: print("Correct start end: ", correct_start_end_word, "\npredict: ", output, start, end, "\nstart end:", question['start_end_char'], file = file)
if question['answer'][0] == output and (start == correct_start_end_word[0] and end == correct_start_end_word[1]):
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
elif pretrain == 'bertmc':
loss, output = multiple_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
if correct_answer == output :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
elif pretrain == 'bertbc':
loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], question['candidate_answers'], question['answer'], other, device, dataset = data_name)
if train_log: print("predict: ", output, file = file)
correct_answer = question['answer']
correct_answer.sort()
output.sort()
if correct_answer == output :
correct+=1
if train_log: print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
if qtype == 'YN':
if correct_answer == ['Yes']: TPFN[0] += 1
elif correct_answer == ['No']: TPFN[1] += 1
elif correct_answer == ['DK']: TPFN[2] += 1
if output == ['Yes']: TPFP[0] += 1
elif output == ['No']: TPFP[1] += 1
elif output == ['DK']: TPFP[2] += 1
if output == correct_answer == ['Yes']: TP[0] += 1
elif output == correct_answer == ['No']: TP[1] += 1
elif output == correct_answer == ['DK']: TP[2] += 1
if train_log: print("Loss is ", loss.item(), file = file)
losses += [loss.item()]
loss.backward()
optimizer.step()
losses = np.sum(losses)
print('Train Final accuracy: ', correct/ all_q)
print('Train Final accuracy: ', correct/ all_q, file = file)
print('Losses: ', losses)
print('Losses: ', losses, file = file)
if qtype == 'YN':
print('TP:',TP, ' TPFP: ', TPFP,' TPFN: ', TPFN ,file = file)
Precision = np.nan_to_num(TP / TPFP)
Recall = np.nan_to_num(TP / TPFN)
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
Macro_F1 = np.average(F1[:2])
print('Train Final Precision: ', Precision, file = file)
print('Train Final Recall: ', Recall, file = file)
print('Train Final F1: ', F1, file = file)
print('Train Final Macro_F1: ', Macro_F1)
print('Train Final Macro_F1: ', Macro_F1, file = file)
return losses, (correct/ all_q, Macro_F1)
return losses, (correct/ all_q,)
def correct_token_id(story, question, start_end, tokenizing, file):
story_tokenized = tokenizing(story)
q_tokenized = tokenizing(question)
#finding the start and end token based on the characters
sum_char = 0
start_end_token = []
for s_e in start_end[:1]:
temp = s_e[0]
sum_char = 0
is_start,start, end = True, None, None
for ind,word in enumerate(story_tokenized):
len_word = len(word)
if temp > sum_char + len(word) : sum_char += len_word;
else:
if is_start:
start, is_start = ind , False
if s_e[1]-1 <= sum_char + len(word): start_end_token+=[[start, ind]];break
else: temp = s_e[1]-1;
else: start_end_token+=[[start, ind]]; break
if ind != len(story_tokenized)-1 and story_tokenized[ind+1] != '.' and story_tokenized[ind+1] != ',' and story_tokenized[ind+1] != "'" and story_tokenized[ind] != "'": sum_char += 1 # plus one for space
start_end_token[-1][0] += len(q_tokenized)+2 # 2 for [cls] and [SEP]
start_end_token[-1][1] += len(q_tokenized)+2
return start_end_token[0]
# def train_babi(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/babi/train.json') as json_file:
# data = json.load(json_file)
# number_samples = int((num_sample/8)+1) if num_sample else num_sample
# #random sampling or not
# random.seed(1)
# stories = random.sample(data['data'], number_samples) if num_sample in [100, 500, 1000, 2000, 5000] else data['data']
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# for story in stories[:number_samples]:
# # if is_DK_babi(story['story'][0]): continue
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['story'][0]
# x = 0
# # each question (span)
# for question in story['questions']:
# q_text, q_emb= '', []
# if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = question['question']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',question['answer'], file = file)
# loss, output = boolean_classification(model, q_text, story_txt, question['q_type'], ['babi'], question['answer'], other, device)
# #print("logit: ", logit , file = file)
# print("predict: ", output, file = file)
# correct_answer = question['answer']
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
# def train_boolq(model, criterion, optimizer,pretrain, baseline, num_sample, train24k, qtype, other, device, file):
# #import baseline
# if baseline == 'bert':
# from BERT import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'xlnet':
# from XLNet import question_answering, tokenizing, multiple_choice, boolean_classification
# elif baseline == 'albert':
# from ALBERT import question_answering, tokenizing, multiple_choice, boolean_classification
# with open('./dataset/boolQ/train.json') as json_file:
# data = json.load(json_file)
# model.train()
# all_q = 0
# correct = 0
# s_ind = 0
# losses = []
# x = 0
# for story in data['data'][:num_sample]:
# s_ind+= 1
# print('sample ',s_ind)
# print('sample ',s_ind, file = file)
# story_txt = story['passage'][:1000]
# # print(story_txt)
# # each question (span)
# # for question in story['questions']:
# # q_text, q_emb= '', []
# # if question['q_type'] in [qtype] : #and len(question['answer']) == 1: #and x == 0:
# x+=1
# q_text = story['question']
# answer = ['Yes'] if story['answer'] == True else ['No']
# model.zero_grad()
# all_q += 1
# print('Story:\n',story_txt, file = file)
# print('question: ', q_text, '\nanswer: ',answer, file = file)
# loss, output = boolean_classification(model, q_text, story_txt, 'YN', ['boolq'], answer, other, device)
# print("predict: ", output, file = file)
# correct_answer = answer
# correct_answer.sort()
# if correct_answer == output :
# correct+=1
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print("Loss is ", loss.item(), file = file)
# losses += [loss.item()]
# loss.backward()
# optimizer.step()
# losses = np.sum(losses)
# print('Train Final accuracy: ', correct/ all_q)
# print('Train Final accuracy: ', correct/ all_q, file = file)
# print('Losses: ', losses)
# print('Losses: ', losses, file = file)
# return losses, correct/ all_q
def is_DK_babi(story):
has_left = True if 'left' in story else False
has_right = True if 'right' in story else False
has_below = True if 'below' in story else False
has_above = True if 'above' in story else False
if has_left and (has_above or has_below): return True
elif has_right and (has_above or has_below): return True
elif has_above and (has_left or has_right): return True
elif has_below and (has_left or has_right): return True
return False
def question_to_sentence(question, q_type, answer, candidate):
if q_type == 'FB':
if 'Which' in question:
question = question.replace('Which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'which' in question:
question = question.replace('which block', 'block [MASK]').replace('(s)','').replace('?','.')
elif 'what' in question:
question = question.replace('what block', 'block [MASK]').replace('(s)','').replace('?','.')
else:
question = question.replace('What block', 'block [MASK]').replace('(s)','').replace('?','.')
elif q_type == 'FR':
# print('hi',question)
if 'What' in question:
question = question.replace('What is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
# print(question)
elif 'exist' in question:
question = question.replace('what relations exist between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'what' in question:
question = question.replace('what is the relation between','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('and', 'is [MASK] [MASK]')
else: question = question.replace('and', 'is [MASK]')
elif 'where' in question:
question = question.replace('where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
else:
question = question.replace('Where is','').replace('?','.')
if answer == [4] or answer == [5]: question = question.replace('regarding to', 'is [MASK] [MASK]')
else: question = question.replace('regarding to', 'is [MASK]')
elif q_type == 'CO':
# print(question, answer)
answer = candidate[answer[0]]
token_answer = tokenizing(answer)
mask = ('[MASK] '*len(token_answer))[:-1]
# print('mask', mask)
if 'What' in question:
question = question[:question.find('?')+1]
if 'What object' in question:
question = question.replace('What object',mask).replace('?','.')
elif 'What thing' in question:
question = question.replace('What thing',mask).replace('?','.')
elif 'What square' in question:
question = question.replace('What square',mask).replace('?','.')
else:
question = question.replace('What',mask).replace('?','.')
elif 'what' in question:
question = question[:question.find('?')+1]
if 'what object' in question:
question = question.replace('what object',mask).replace('?','.')
elif 'what thing' in question:
question = question.replace('what thing',mask).replace('?','.')
else:
question = question.replace('what',mask).replace('?','.')
elif 'Which' in question:
if 'Which object' in question:
question = question[:question.find('?')+1]
question = question.replace('Which object',mask).replace('?','.')
elif 'Which square' in question:
question = question[:question.find('?')+1]
question = question.replace('Which square',mask).replace('?','.')
elif 'which' in question:
question = question[:question.find('?')+1]
question = question.replace('which object',mask).replace('?','.')
return question
def confusion_matrix(truth, predict,correct, TP,TPFP,TPFN):
#Accuracy
# correct_temp = 0
# for i in range(len(output)):
# if output[i] == truth[i].item(): correct_temp+=1
# correct += correct_temp / len(output)
# print('total: ', all_q, ' correct: ', correct, file = file)
# print('total: ', all_q, ' correct: ', correct)
# print(truth, predict)
if truth == predict : correct +=1
for i in range(len(truth)):
#TP
if truth[i] == predict[i]: TP[truth[i]] += 1
#TPFP
TPFP[predict[i]]+= 1
#TPFN
TPFN[truth[i]] += 1
return correct, TP, TPFP, TPFN
def precision(TP,TPFP):
return np.nan_to_num(TP[1:]/TPFP[1:])
def recall(TP,TPFN):
return np.nan_to_num(TP[1:]/TPFN[1:])
def F1_measure(TP,TPFP, TPFN,macro= False):
Precision = np.nan_to_num(TP[1:] / TPFP[1:])
Recall = np.nan_to_num(TP[1:] / TPFN[1:])
F1 = np.nan_to_num((2 * (Precision * Recall)) / (Precision + Recall))
return np.average(F1) if macro else F1
| 24,398 | 38.867647 | 237 | py |
FGI-Matting | FGI-Matting-main/main.py | import os
import toml
import argparse
from pprint import pprint
import torch
from torch.utils.data import DataLoader
import utils
from utils import CONFIG
from tester import Tester
import dataloader
def main():
CONFIG.log.logging_path += "_test"
if CONFIG.test.alpha_path is not None:
utils.make_dir(CONFIG.test.alpha_path)
utils.make_dir(CONFIG.log.logging_path)
# Create a logger
logger = utils.get_logger(CONFIG.log.logging_path,
logging_level=CONFIG.log.logging_level)
test_dataloader = dataloader.get_Test_dataloader()
tester = Tester(test_dataloader=test_dataloader)
tester.test()
if __name__ == '__main__':
print('Torch Version: ', torch.__version__)
parser = argparse.ArgumentParser()
parser.add_argument('--phase', type=str, default='test')
parser.add_argument('--config', type=str, default='config/FGI_config.toml')
# Parse configuration
args = parser.parse_args()
with open(args.config) as f:
utils.load_config(toml.load(f))
# Check if toml config file is loaded
if CONFIG.is_default:
raise ValueError("No .toml config loaded.")
CONFIG.phase = args.phase
CONFIG.log.logging_path = os.path.join(CONFIG.log.logging_path, CONFIG.version)
if CONFIG.test.alpha_path is not None:
CONFIG.test.alpha_path = os.path.join(CONFIG.test.alpha_path, CONFIG.version)
pprint(CONFIG)
#Test
main()
| 1,487 | 23 | 85 | py |
FGI-Matting | FGI-Matting-main/tester.py | import os
import cv2
import logging
import numpy as np
import torch
from time import time
import utils
from utils import CONFIG
import networks
from utils import comput_sad_loss, compute_connectivity_error, \
compute_gradient_loss, compute_mse_loss
class Tester(object):
def __init__(self, test_dataloader):
self.test_dataloader = test_dataloader
self.logger = logging.getLogger("Logger")
self.model_config = CONFIG.model
self.test_config = CONFIG.test
self.log_config = CONFIG.log
self.data_config = CONFIG.data
self.build_model()
self.resume_step = None
utils.print_network(self.G, CONFIG.version)
if self.test_config.checkpoint:
self.logger.info('Resume checkpoint: {}'.format(self.test_config.checkpoint))
self.restore_model(self.test_config.checkpoint)
def build_model(self):
self.G = networks.get_generator(encoder=self.model_config.arch.encoder, decoder=self.model_config.arch.decoder)
if not self.test_config.cpu:
self.G.cuda()
def restore_model(self, resume_checkpoint):
"""
Restore the trained generator and discriminator.
:param resume_checkpoint: File name of checkpoint
:return:
"""
pth_path = os.path.join(self.log_config.checkpoint_path, '{}.pth'.format(resume_checkpoint))
print('model path: ', pth_path)
checkpoint = torch.load(pth_path)
self.G.load_state_dict(utils.remove_prefix_state_dict(checkpoint['state_dict']), strict=True)
def test(self):
self.G = self.G.eval()
mse_loss = 0
sad_loss = 0
conn_loss = 0
grad_loss = 0
test_num = 0
test_time = 0
img_count = 0
with torch.no_grad():
for image_dict in self.test_dataloader:
image, alpha, trimap = image_dict['image'], image_dict['alpha'], image_dict['trimap']
guidancemap = image_dict['guidancemap']
alpha_shape, name = image_dict['alpha_shape'], image_dict['image_name']
if CONFIG.test.guidancemap_phase == "trimap":
inpmap = trimap
else:
inpmap = guidancemap
if not self.test_config.cpu:
image = image.cuda()
alpha = alpha.cuda()
inpmap = inpmap.cuda()
start = time()
alpha_pred, _ = self.G(image, inpmap)
end = time()
inference_time = end - start
test_time+=inference_time
img_count+=1
print(inference_time, img_count)
if self.model_config.trimap_channel == 3:
trimap = trimap.argmax(dim=1, keepdim=True)
# alpha_pred[trimap == 2] = 1
# alpha_pred[trimap == 0] = 0
trimap[trimap==2] = 255
trimap[trimap==1] = 128
for cnt in range(image.shape[0]):
h, w = alpha_shape
test_alpha = alpha[cnt, 0, ...].data.cpu().numpy() * 255
test_pred = alpha_pred[cnt, 0, ...].data.cpu().numpy() * 255
test_pred = test_pred.astype(np.uint8)
test_trimap = trimap[cnt, 0, ...].data.cpu().numpy()
test_pred = test_pred[:h, :w]
test_trimap = test_trimap[:h, :w]
if self.test_config.alpha_path is not None:
cv2.imwrite(os.path.join(self.test_config.alpha_path, os.path.splitext(name[cnt])[0] + ".png"),
test_pred)
mse_loss += compute_mse_loss(test_pred, test_alpha, test_trimap)
print(name, comput_sad_loss(test_pred, test_alpha, test_trimap)[0])
sad_loss += comput_sad_loss(test_pred, test_alpha, test_trimap)[0]
if not self.test_config.fast_eval:
conn_loss += compute_connectivity_error(test_pred, test_alpha, test_trimap, 0.1)
grad_loss += compute_gradient_loss(test_pred, test_alpha, test_trimap)
test_num += 1
self.logger.info("TEST NUM: \t\t {}".format(test_num))
self.logger.info("MSE: \t\t {}".format(mse_loss / test_num))
self.logger.info("SAD: \t\t {}".format(sad_loss / test_num))
if not self.test_config.fast_eval:
self.logger.info("GRAD: \t\t {}".format(grad_loss / test_num))
self.logger.info("CONN: \t\t {}".format(conn_loss / test_num))
self.logger.info("time: \t\t {}".format(test_time))
self.logger.info("time_per_image: \t\t {}".format(test_time/ test_num))
| 4,859 | 36.96875 | 119 | py |
FGI-Matting | FGI-Matting-main/paint_board.py | from PyQt5.QtWidgets import QWidget
from PyQt5.Qt import QPixmap, QPainter, QPoint, QPaintEvent, QMouseEvent, QPen,\
QColor, QSize
from PyQt5.QtCore import Qt
from PIL import Image, ImageQt
# from cv2 import findTransformECC
import numpy as np
# from torch._C import _cuda_resetAccumulatedMemoryStats
import copy
class PaintBoard(QWidget):
def __init__(self, Parent=None, bg_img = None):#bg_img为PIL格式
'''
Constructor
'''
super().__init__(Parent)
self.bg_img = bg_img
self.disp_img_longside = 800
if(self.bg_img!=None):
self.inp_img_w, self.inp_img_h = self.bg_img.size
if(self.inp_img_w >= self.inp_img_h):
self.disp_img_w = 800
self.disp_img_h = int(self.inp_img_h*(800.0/self.inp_img_w))
else:
self.disp_img_h = 800
self.disp_img_w = int(self.inp_img_w*(800.0/self.inp_img_h))
self.bg_img = self.bg_img.resize((self.disp_img_w, self.disp_img_h))
else:
self.inp_img_w, self.inp_img_h = 800, 800
self.disp_img_w = 800
self.disp_img_h = 800
self.bg_img = np.ones([self.disp_img_h, self.disp_img_w, 3]).astype(np.uint8)*255
self.bg_img = Image.fromarray(self.bg_img).convert('RGB')#转换为PIL格式
self.__InitData() #先初始化数据,再初始化界面
self.__InitView()
#绘图工具相关
self.__painter = QPainter()#新建绘图工具
self.__thickness = 10 #默认画笔粗细为10px
self.__penColor = QColor("black")#设置默认画笔颜色为黑色
self.__colorList = QColor.colorNames() #获取颜色列表
def __InitData(self):
self.__size = QSize(self.disp_img_w, self.disp_img_h)
#新建QPixmap作为画板,尺寸为__size
self.__board = QPixmap(self.__size)#这是主画板,缩放之后进行的绘制都要同步到这一张上来
self.__board.fill(Qt.gray) #用灰色填充画板
self.__IsEmpty = True #默认为空画板
self.EraserMode = False #默认为禁用橡皮擦模式
self.__lastPos = QPoint(0,0)#上一次鼠标位置
self.__currentPos = QPoint(0,0)#当前的鼠标位置
def __InitView(self):
#设置界面的尺寸为__size
self.setFixedSize(self.__size)
def Reset_paintboard(self, bg_img):#把新图片加载到画板上
self.bg_img = bg_img
self.inp_img_w, self.inp_img_h = self.bg_img.size
if(self.inp_img_w >= self.inp_img_h):
self.disp_img_w = 800
self.disp_img_h = int(self.inp_img_h*(800.0/self.inp_img_w))
else:
self.disp_img_h = 800
self.disp_img_w = int(self.inp_img_w*(800.0/self.inp_img_h))
self.bg_img = self.bg_img.resize((self.disp_img_w, self.disp_img_h))
self.__InitData() #先初始化数据,再初始化界面
self.__InitView()
self.update()
def Clear(self):
#清空画板
self.__board.fill(Qt.gray)
self.update()
self.__IsEmpty = True
def ChangePenColor(self, color="black"):
#改变画笔颜色
self.__penColor = QColor(color)
def ChangePenThickness(self, thickness=10):
#改变画笔粗细
self.__thickness = thickness
def IsEmpty(self):
#返回画板是否为空
return self.__IsEmpty
def GetContentAsQImage(self):
#获取画板内容(返回QImage)
image = self.__board.toImage()
return image
def GetContentAsPILImage(self):
#获取画板内容(返回PIL)
image = self.__board.toImage()
image_PIL = ImageQt.fromqimage(image)
image_array = np.array(image_PIL).astype(np.uint8)[:,:,::-1]#RGB转BGR
import cv2
image_array = cv2.resize(image_array, (self.inp_img_w, self.inp_img_h), interpolation=cv2.INTER_NEAREST)
image_PIL_resized = Image.fromarray(image_array[:,:,::-1]).convert('RGB')
# print("in", (self.inp_img_w, self.inp_img_h))
# image_PIL.resize((self.inp_img_w, self.inp_img_h), Image.NEAREST)
# print("out", image_PIL.size)
return image_PIL_resized
def paintEvent(self, paintEvent):
#绘图事件
#只要窗口部件需要被重绘就被调用 update可以调用
#绘图时必须使用QPainter的实例,此处为__painter
#绘图在begin()函数与end()函数间进行
#begin(param)的参数要指定绘图设备,即把图画在哪里
#drawPixmap用于绘制QPixmap类型的对象
self.__painter.begin(self)#表示在整个控件上画
# 0,0为绘图的左上角起点的坐标,__board即要绘制的图
#self.__draw_board转换为array
bg_qimage = self.__board.toImage()
bg_PIL = ImageQt.fromqimage(bg_qimage)
bg_array = np.array(bg_PIL)
#和self.local_bg_img融合
fuse = 0.5*bg_array+0.5*np.array(self.bg_img)
fuse_PIL = Image.fromarray(fuse.astype('uint8')).convert('RGB')
fuse_qimage = ImageQt.ImageQt(fuse_PIL)
#转换成Qpixmap显示
fuse_qpixmap = QPixmap.fromImage(fuse_qimage).scaled(self.disp_img_w, self.disp_img_h)
self.__painter.drawPixmap(0,0,fuse_qpixmap)
self.__painter.end()
def mousePressEvent(self, mouseEvent):
#鼠标按下时,获取鼠标的当前位置保存为上一次位置
self.__currentPos = mouseEvent.pos()
self.__painter.begin(self.__board)#表示在Qpixmap上画
if self.EraserMode == False:
#非橡皮擦模式
self.__painter.setPen(QPen(self.__penColor,self.__thickness, cap = Qt.RoundCap)) #设置画笔颜色,粗细
else:
#橡皮擦模式下画笔为灰色,粗细为20
self.__painter.setPen(QPen(Qt.gray, 20, cap = Qt.RoundCap))
#画
self.__painter.drawPoints(self.__currentPos)
self.__painter.end()
self.__lastPos = self.__currentPos
self.update() #更新显示
def mouseMoveEvent(self, mouseEvent):
#鼠标移动时,更新当前位置,并在上一个位置和当前位置间画线
self.__currentPos = mouseEvent.pos()
self.__painter.begin(self.__board)#表示在Qpixmap上画
if self.EraserMode == False:
#非橡皮擦模式
self.__painter.setPen(QPen(self.__penColor,self.__thickness, cap = Qt.RoundCap)) #设置画笔颜色,粗细
else:
#橡皮擦模式下画笔为灰色,粗细为20
self.__painter.setPen(QPen(Qt.gray, 20, cap = Qt.RoundCap))
#画线
self.__painter.drawLine(self.__lastPos, self.__currentPos)
self.__painter.end()
self.__lastPos = self.__currentPos
self.update() #更新显示
def Set_paint_image(self, paintImg_PIL):#加载已经画好的trimap
#先加载trimap到self.__board中
paintImg_PIL.resize((self.disp_img_w, self.disp_img_h), Image.NEAREST)
paintImg_qimage = ImageQt.ImageQt(paintImg_PIL)
paintImg_qpixmap = QPixmap.fromImage(paintImg_qimage).scaled(self.disp_img_w, self.disp_img_h)
self.__painter.begin(self.__board)
self.__painter.drawPixmap(0,0,paintImg_qpixmap)
self.__painter.end()
self.update()
def mouseReleaseEvent(self, mouseEvent):
self.__IsEmpty = False #画板不再为空
| 6,878 | 27.192623 | 112 | py |
FGI-Matting | FGI-Matting-main/test_one_img.py | import cv2
import numpy as np
import torch
from torch.nn import functional as F
import networks
import utils
import os
from time import time
class Tester_one_image(object):
def __init__(self, test_config):
self.model_config = {'encoder': "res_shortcut_encoder_29_spatial_attn", 'decoder': "res_shortcut_decoder_22_spatial_attn", 'trimap_channel':1}
self.test_config = test_config
self.build_model()
self.resume_step = None
if self.test_config['checkpoint']:
self.restore_model(self.test_config['checkpoint'])
def build_model(self):
self.G = networks.get_generator(encoder=self.model_config['encoder'], decoder=self.model_config['decoder'])
if torch.cuda.is_available():
self.G.cuda()
def restore_model(self, resume_checkpoint):
"""
Restore the trained generator and discriminator.
:param resume_checkpoint: File name of checkpoint
:return:
"""
pth_path = os.path.join(self.test_config['checkpoint_path'], '{}.pth'.format(resume_checkpoint))
checkpoint = torch.load(pth_path, map_location=torch.device('cpu'))
self.G.load_state_dict(utils.remove_prefix_state_dict(checkpoint['state_dict']), strict=True)
def test(self, img, trimap):
self.G = self.G.eval()
with torch.no_grad():
alpha_shape = img.shape[1:3]
img = img.unsqueeze(0)
trimap = trimap.unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
trimap = trimap.cuda()
print("Using GPU")
else:
print("Using CPU")
start = time()
alpha_pred, _ = self.G(img, trimap)
end = time()
inference_time = end - start
print('inference_time:', inference_time)
if self.model_config['trimap_channel'] == 3:
trimap = trimap.argmax(dim=1, keepdim=True)
alpha_pred[trimap == 2] = 1
alpha_pred[trimap == 0] = 0
alpha_pred = alpha_pred[0][0]
alpha_pred = alpha_pred.cpu()
return alpha_pred
def inference(img_ori, trimap):
test_config = {'checkpoint_path':"./checkpoints", 'checkpoint':"Weight_qt_in_use"}
"""长宽处理到32的倍数,保存输入长宽,之后剪裁回来"""
h_ori, w_ori = trimap.shape
target_h = 32 * ((h_ori - 1) // 32 + 1)
target_w = 32 * ((w_ori - 1) // 32 + 1)
# img = cv2.resize(img_ori, (target_w, target_h))
# trimap = cv2.resize(trimap, (target_w, target_h))
pad_h = target_h - h_ori
pad_w = target_w - w_ori
img = np.pad(img_ori, ((0,pad_h), (0, pad_w), (0,0)), mode="reflect")
trimap = np.pad(trimap, ((0,pad_h), (0, pad_w)), mode="reflect")
"""转为tensor"""
trimap[trimap < 20] = 0
trimap[trimap > 230] = 2
trimap[(trimap>=20) & (trimap<=230)] = 1
# print(trimap.max())
# cv2.imshow('trimap', trimap)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
trimap_inp = trimap.copy()
trimap_inp = torch.from_numpy(trimap_inp).float().unsqueeze(0)
# trimap_inp = F.one_hot(trimap_inp, num_classes=3).permute(2,0,1).float()
mean = torch.tensor([0.485, 0.456, 0.406]).view(3,1,1)
std = torch.tensor([0.229, 0.224, 0.225]).view(3,1,1)
img = img.transpose((2, 0, 1)).astype(np.float32)
img /= 255.
img = torch.from_numpy(img)
img = img.sub_(mean).div_(std)
tester = Tester_one_image(test_config)
alpha_pred = tester.test(img, trimap_inp)
"""将预测alpha图、新的背景图转换为输入原图的大小,输出的alpha图剪裁一下就行了"""
test_pred = alpha_pred.data.cpu().numpy() * 255
test_pred = test_pred.astype(np.uint8)
test_pred = test_pred[:h_ori, :w_ori]
test_pred = test_pred.astype(np.float)/255
fg = img_ori * test_pred[:, :, None]
fg = fg.astype(np.uint8)
test_pred*=255
test_pred = test_pred.astype(np.uint8)
return test_pred, fg
if __name__=='__main__':
pass | 4,037 | 26.100671 | 150 | py |
FGI-Matting | FGI-Matting-main/networks/generators.py | import os
import sys
sys.path.append(os.getcwd())
import torch
import torch.nn as nn
from utils import CONFIG
from networks import encoders, decoders
class Generator(nn.Module):
def __init__(self, encoder, decoder):
super(Generator, self).__init__()
if encoder not in encoders.__all__:
raise NotImplementedError("Unknown Encoder {}".format(encoder))
self.encoder = encoders.__dict__[encoder]()
if decoder not in decoders.__all__:
raise NotImplementedError("Unknown Decoder {}".format(decoder))
self.decoder = decoders.__dict__[decoder]()
def forward(self, image, trimap):
inp = torch.cat((image, trimap), dim=1)
embedding, mid_fea = self.encoder(inp)
alpha, info_dict = self.decoder(embedding, mid_fea, trimap)
return alpha, info_dict
def get_generator(encoder, decoder):
generator = Generator(encoder=encoder, decoder=decoder)
return generator
if __name__=="__main__":
import time
generator = get_generator(encoder="res_shortcut_encoder_29_spatial_attn", decoder="res_shortcut_decoder_22_spatial_attn").eval()
generator = generator.cuda()
time_all = 0
with torch.no_grad():
for i in range(50):
inp1 = torch.rand([1,3,1024,1024]).float().cuda()
inp2 = torch.rand([1,1,1024,1024]).float().cuda()
t1 = time.time()
oup = generator(inp1, inp2)
time_p = time.time()-t1
time_all+= time_p
print("time:", time_p)
print(oup[0].size())
print("avg time:", time_all/50)
| 1,612 | 28.327273 | 132 | py |
FGI-Matting | FGI-Matting-main/networks/fpemjpu.py | # -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:55
# @Author : zhoujun
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
from torch.nn import functional as F
from torch.nn import Module, Sequential, Conv2d, ReLU, AdaptiveAvgPool2d, BCELoss, CrossEntropyLoss
from networks.ops import SpectralNorm
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = SpectralNorm(conv3x3(inplanes, planes, stride))
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = SpectralNorm(conv3x3(planes, planes))
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = SpectralNorm(conv3x3(width, width, stride, groups, dilation))
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(4, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
# if zero_init_residual:
# for m in self.modules():
# if isinstance(m, Bottleneck):
# nn.init.constant_(m.bn3.weight, 0)
# elif isinstance(m, BasicBlock):
# nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x_in):
x = self.conv1(x_in)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
c2 = self.layer1(x)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
return x_in, x, c2, c3, c4, c5
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
print('load pretrained models from imagenet')
return model
def resnet18(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
class FPEM_FFM(nn.Module):
def __init__(self, backbone_out_channels, **kwargs):
"""
PANnet
:param backbone_out_channels: 基础网络输出的维度
"""
super().__init__()
fpem_repeat = kwargs.get('fpem_repeat', 2)
conv_out = 128
# reduce layers
self.reduce_conv_c2 = nn.Sequential(
nn.Conv2d(in_channels=backbone_out_channels[0], out_channels=conv_out, kernel_size=1),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.reduce_conv_c3 = nn.Sequential(
nn.Conv2d(in_channels=backbone_out_channels[1], out_channels=conv_out, kernel_size=1),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.reduce_conv_c4 = nn.Sequential(
nn.Conv2d(in_channels=backbone_out_channels[2], out_channels=conv_out, kernel_size=1),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.reduce_conv_c5 = nn.Sequential(
nn.Conv2d(in_channels=backbone_out_channels[3], out_channels=conv_out, kernel_size=1),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.fpems = nn.ModuleList()
for i in range(fpem_repeat):
self.fpems.append(FPEM(conv_out))
self.out_conv = nn.Conv2d(in_channels=conv_out * 4, out_channels=6, kernel_size=1)
def forward(self, x):
c2, c3, c4, c5 = x
# reduce channel
c2 = self.reduce_conv_c2(c2)
c3 = self.reduce_conv_c3(c3)
c4 = self.reduce_conv_c4(c4)
c5 = self.reduce_conv_c5(c5)
# FPEM
for i, fpem in enumerate(self.fpems):
c2, c3, c4, c5 = fpem(c2, c3, c4, c5)
if i == 0:
c2_ffm = c2
c3_ffm = c3
c4_ffm = c4
c5_ffm = c5
else:
c2_ffm += c2
c3_ffm += c3
c4_ffm += c4
c5_ffm += c5
# FFM
c5 = F.interpolate(c5_ffm, c2_ffm.size()[-2:], mode='bilinear')
c4 = F.interpolate(c4_ffm, c2_ffm.size()[-2:], mode='bilinear')
c3 = F.interpolate(c3_ffm, c2_ffm.size()[-2:], mode='bilinear')
Fy = torch.cat([c2_ffm, c3, c4, c5], dim=1)
y = self.out_conv(Fy)
return y
class FPEM_FUSION(nn.Module):
def __init__(self, backbone_out_channels=[64,128,256,512], fpem_repeat=2,fusion_type='JPU'):
"""
PANnet
:param backbone_out_channels: 基础网络输出的维度
"""
super(FPEM_FUSION, self).__init__()
# super().__init__()
self.fpem_repeat = fpem_repeat
self.fusion_type = fusion_type
self.jpu = JPU(in_channels=[128, 128, 128], width=128, norm_layer=nn.BatchNorm2d)
conv_out = 128
# reduce layers
self.reduce_conv_c2 = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=backbone_out_channels[0], out_channels=conv_out, kernel_size=1)),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.reduce_conv_c3 = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=backbone_out_channels[1], out_channels=conv_out, kernel_size=1)),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.reduce_conv_c4 = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=backbone_out_channels[2], out_channels=conv_out, kernel_size=1)),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.reduce_conv_c5 = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=backbone_out_channels[3], out_channels=conv_out, kernel_size=1)),
nn.BatchNorm2d(conv_out),
nn.ReLU()
)
self.fpems = nn.ModuleList()
for i in range(fpem_repeat):
self.fpems.append(FPEM(conv_out))
self.out_conv = nn.Conv2d(in_channels=conv_out * 4, out_channels=64, kernel_size=1)
self.out_conv_jpu = nn.Conv2d(in_channels=conv_out * 5, out_channels=64, kernel_size=1)
def forward(self, c2_in, c3, c4, c5):
# reduce channel
c2 = self.reduce_conv_c2(c2_in)
c3 = self.reduce_conv_c3(c3)
c4 = self.reduce_conv_c4(c4)
c5 = self.reduce_conv_c5(c5)
# FPEM
for i, fpem in enumerate(self.fpems):
c2, c3, c4, c5 = fpem(c2, c3, c4, c5)
if i == 0:
c2_ffm = c2
c3_ffm = c3
c4_ffm = c4
c5_ffm = c5
else:
c2_ffm += c2
c3_ffm += c3
c4_ffm += c4
c5_ffm += c5
# c2_ffm = c2
# c3_ffm = c3
# c4_ffm = c4
# c5_ffm = c5
# FFM
if self.fusion_type=='FFM':
c5 = F.interpolate(c5_ffm, c2_ffm.size()[-2:], mode='bilinear')
c4 = F.interpolate(c4_ffm, c2_ffm.size()[-2:], mode='bilinear')
c3 = F.interpolate(c3_ffm, c2_ffm.size()[-2:], mode='bilinear')
Fy = torch.cat([c2_ffm, c3, c4, c5], dim=1)
y_2 = self.out_conv(Fy)
elif self.fusion_type=='JPU':
# print('c2_ffm', c2_ffm.size())
# print('c5_ffm,c4_ffm,c3_ffm', c5_ffm.size(),c4_ffm.size(),c3_ffm.size())
_,_,_,Fy = self.jpu(c5_ffm,c4_ffm,c3_ffm)
# print('Fy', Fy.size())
c3 = F.interpolate(Fy, c2_ffm.size()[-2:], mode='bilinear')
Fy = torch.cat([c2_ffm, c3], dim=1)
y_2 = self.out_conv_jpu(Fy)
return y_2
class FPEM(nn.Module):
def __init__(self, in_channels=128):
super().__init__()
self.up_add1 = SeparableConv2d(in_channels, in_channels, 1)
self.up_add2 = SeparableConv2d(in_channels, in_channels, 1)
self.up_add3 = SeparableConv2d(in_channels, in_channels, 1)
self.down_add1 = SeparableConv2d(in_channels, in_channels, 2)
self.down_add2 = SeparableConv2d(in_channels, in_channels, 2)
self.down_add3 = SeparableConv2d(in_channels, in_channels, 2)
def forward(self, c2, c3, c4, c5):
# up阶段
c4 = self.up_add1(self._upsample_add(c5, c4))
c3 = self.up_add2(self._upsample_add(c4, c3))
c2 = self.up_add3(self._upsample_add(c3, c2))
# down 阶段
c3 = self.down_add1(self._upsample_add(c3, c2))
c4 = self.down_add2(self._upsample_add(c4, c3))
c5 = self.down_add3(self._upsample_add(c5, c4))
return c2, c3, c4, c5
def _upsample_add(self, x, y):
return F.interpolate(x, size=y.size()[2:], mode='bilinear') + y
class JPU_SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False, norm_layer=nn.BatchNorm2d):
super(JPU_SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.bn = norm_layer(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(SeparableConv2d, self).__init__()
self.depthwise_conv = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, padding=1,
stride=stride, groups=in_channels)
self.pointwise_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class JPU(nn.Module):
def __init__(self, in_channels=[128,128,128], width=128, norm_layer=nn.BatchNorm2d):
super(JPU, self).__init__()
# self.up_kwargs = up_kwargs
self.conv5 = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False)),
norm_layer(width),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False)),
norm_layer(width),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False)),
norm_layer(width),
nn.ReLU(inplace=True))
# self.conv2 = nn.Sequential(
# SpectralNorm(nn.Conv2d(in_channels[-4], width, 3, padding=1, bias=False)),
# norm_layer(width),
# nn.ReLU(inplace=True))
self.dilation1 = nn.Sequential(JPU_SeparableConv2d(3*width, width, kernel_size=3, padding=1, dilation=1, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.dilation2 = nn.Sequential(JPU_SeparableConv2d(3*width, width, kernel_size=3, padding=2, dilation=2, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.dilation3 = nn.Sequential(JPU_SeparableConv2d(3*width, width, kernel_size=3, padding=4, dilation=4, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
self.dilation4 = nn.Sequential(JPU_SeparableConv2d(3*width, width, kernel_size=3, padding=8, dilation=8, bias=False),
norm_layer(width),
nn.ReLU(inplace=True))
def forward(self, *inputs):
#print(inputs[-1].size())
feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])]#大->小
_, _, h, w = feats[-3].size()
#print(h, w)
feats[-1] = F.interpolate(feats[-1], (h, w), mode='bilinear')
feats[-2] = F.interpolate(feats[-2], (h, w), mode='bilinear')
feat = torch.cat(feats, dim=1)
feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], dim=1)
#print('feat:', feat.size())
return inputs[0], inputs[1], inputs[2], feat
if __name__ == '__main__':
import torch
x = torch.zeros(1, 4, 512, 512)
backbone = resnet18(pretrained=False)
fpem_jpu = FPEM_FUSION(backbone_out_channels=[64,128,256,512],fpem_repeat=2)
x_in,c1,c2_in, c3, c4, c5 = backbone(x)
y = fpem_jpu(c2_in, c3, c4, c5)
print(y.shape) | 19,539 | 38.474747 | 128 | py |
FGI-Matting | FGI-Matting-main/networks/ops.py | import torch
from torch import nn
from torch.nn import Parameter
from torch.autograd import Variable
from torch.nn import functional as F
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
"""
Based on https://github.com/heykeetae/Self-Attention-GAN/blob/master/spectral.py
and add _noupdate_u_v() for evaluation
"""
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _noupdate_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
# if torch.is_grad_enabled() and self.module.training:
if self.module.training:
self._update_u_v()
else:
self._noupdate_u_v()
return self.module.forward(*args)
class GuidedCxtAtten(nn.Module):
# based on https://github.com/nbei/Deep-Flow-Guided-Video-Inpainting/blob/a6fe298fec502bfd9cbc64eb01e39f78a3262a59/models/DeepFill_Models/ops.py#L210
def __init__(self, out_channels, guidance_channels, rate=2):
super(GuidedCxtAtten, self).__init__()
self.rate = rate
self.padding = nn.ReflectionPad2d(1)
self.up_sample = nn.Upsample(scale_factor=self.rate, mode='nearest')
self.guidance_conv = nn.Conv2d(in_channels=guidance_channels, out_channels=guidance_channels//2,
kernel_size=1, stride=1, padding=0)
self.W = nn.Sequential(
nn.Conv2d(in_channels=out_channels, out_channels=out_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels)
)
nn.init.xavier_uniform_(self.guidance_conv.weight)
nn.init.constant_(self.guidance_conv.bias, 0)
nn.init.xavier_uniform_(self.W[0].weight)
nn.init.constant_(self.W[1].weight, 1e-3)
nn.init.constant_(self.W[1].bias, 0)
def forward(self, f, alpha, unknown=None, ksize=3, stride=1, fuse_k=3, softmax_scale=1., training=True):
f = self.guidance_conv(f)
# get shapes
raw_int_fs = list(f.size()) # N x 64 x 64 x 64
raw_int_alpha = list(alpha.size()) # N x 128 x 64 x 64
# extract patches from background with stride and rate
kernel = 2*self.rate
alpha_w = self.extract_patches(alpha, kernel=kernel, stride=self.rate)
alpha_w = alpha_w.permute(0, 2, 3, 4, 5, 1)
alpha_w = alpha_w.contiguous().view(raw_int_alpha[0], raw_int_alpha[2] // self.rate, raw_int_alpha[3] // self.rate, -1)
alpha_w = alpha_w.contiguous().view(raw_int_alpha[0], -1, kernel, kernel, raw_int_alpha[1])
alpha_w = alpha_w.permute(0, 1, 4, 2, 3)
f = F.interpolate(f, scale_factor=1/self.rate, mode='nearest')
fs = f.size() # B x 64 x 32 x 32
f_groups = torch.split(f, 1, dim=0) # Split tensors by batch dimension; tuple is returned
# from b(B*H*W*C) to w(b*k*k*c*h*w)
int_fs = list(fs)
w = self.extract_patches(f)
w = w.permute(0, 2, 3, 4, 5, 1)
w = w.contiguous().view(raw_int_fs[0], raw_int_fs[2] // self.rate, raw_int_fs[3] // self.rate, -1)
w = w.contiguous().view(raw_int_fs[0], -1, ksize, ksize, raw_int_fs[1])
w = w.permute(0, 1, 4, 2, 3)
# process mask
if unknown is not None:
unknown = unknown.clone()
unknown = F.interpolate(unknown, scale_factor=1/self.rate, mode='nearest')
assert unknown.size(2) == f.size(2), "mask should have same size as f at dim 2,3"
unknown_mean = unknown.mean(dim=[2,3])
known_mean = 1 - unknown_mean
unknown_scale = torch.clamp(torch.sqrt(unknown_mean / known_mean), 0.1, 10).to(alpha)
known_scale = torch.clamp(torch.sqrt(known_mean / unknown_mean), 0.1, 10).to(alpha)
softmax_scale = torch.cat([unknown_scale, known_scale], dim=1)
else:
unknown = torch.ones([fs[0], 1, fs[2], fs[3]]).to(alpha)
softmax_scale = torch.FloatTensor([softmax_scale, softmax_scale]).view(1,2).repeat(fs[0],1).to(alpha)
m = self.extract_patches(unknown)
m = m.permute(0, 2, 3, 4, 5, 1)
m = m.contiguous().view(raw_int_fs[0], raw_int_fs[2]//self.rate, raw_int_fs[3]//self.rate, -1)
m = m.contiguous().view(raw_int_fs[0], -1, ksize, ksize)
m = self.reduce_mean(m) # smoothing, maybe
# mask out the
mm = m.gt(0.).float() # (N, 32*32, 1, 1)
# the correlation with itself should be 0
self_mask = F.one_hot(torch.arange(fs[2] * fs[3]).view(fs[2], fs[3]).contiguous().to(alpha).long(),
num_classes=int_fs[2] * int_fs[3])
self_mask = self_mask.permute(2, 0, 1).view(1, fs[2] * fs[3], fs[2], fs[3]).float() * (-1e4)
w_groups = torch.split(w, 1, dim=0) # Split tensors by batch dimension; tuple is returned
alpha_w_groups = torch.split(alpha_w, 1, dim=0) # Split tensors by batch dimension; tuple is returned
mm_groups = torch.split(mm, 1, dim=0)
scale_group = torch.split(softmax_scale, 1, dim=0)
y = []
offsets = []
k = fuse_k
y_test = []
for xi, wi, alpha_wi, mmi, scale in zip(f_groups, w_groups, alpha_w_groups, mm_groups, scale_group):
# conv for compare
wi = wi[0]
escape_NaN = Variable(torch.FloatTensor([1e-4])).to(alpha)
wi_normed = wi / torch.max(self.l2_norm(wi), escape_NaN)
xi = F.pad(xi, (1,1,1,1), mode='reflect')
yi = F.conv2d(xi, wi_normed, stride=1, padding=0) # yi => (B=1, C=32*32, H=32, W=32)
y_test.append(yi)
# conv implementation for fuse scores to encourage large patches
yi = yi.permute(0, 2, 3, 1)
yi = yi.contiguous().view(1, fs[2], fs[3], fs[2] * fs[3])
yi = yi.permute(0, 3, 1, 2) # (B=1, C=32*32, H=32, W=32)
# softmax to match
# scale the correlation with predicted scale factor for known and unknown area
yi = yi * (scale[0,0] * mmi.gt(0.).float() + scale[0,1] * mmi.le(0.).float()) # mmi => (1, 32*32, 1, 1)
# mask itself, self-mask only applied to unknown area
yi = yi + self_mask * mmi # self_mask: (1, 32*32, 32, 32)
# for small input inference
yi = F.softmax(yi, dim=1)
_, offset = torch.max(yi, dim=1) # argmax; index
offset = torch.stack([offset // fs[3], offset % fs[3]], dim=1)
wi_center = alpha_wi[0]
if self.rate == 1:
left = (kernel) // 2
right = (kernel - 1) // 2
yi = F.pad(yi, (left, right, left, right), mode='reflect')
wi_center = wi_center.permute(1, 0, 2, 3)
yi = F.conv2d(yi, wi_center, padding=0) / 4. # (B=1, C=128, H=64, W=64)
else:
yi = F.conv_transpose2d(yi, wi_center, stride=self.rate, padding=1) / 4. # (B=1, C=128, H=64, W=64)
y.append(yi)
offsets.append(offset)
y = torch.cat(y, dim=0) # back to the mini-batch
y.contiguous().view(raw_int_alpha)
offsets = torch.cat(offsets, dim=0)
offsets = offsets.view([int_fs[0]] + [2] + int_fs[2:])
# # case1: visualize optical flow: minus current position
# h_add = Variable(torch.arange(0,float(fs[2]))).to(alpha).view([1, 1, fs[2], 1])
# h_add = h_add.expand(fs[0], 1, fs[2], fs[3])
# w_add = Variable(torch.arange(0,float(fs[3]))).to(alpha).view([1, 1, 1, fs[3]])
# w_add = w_add.expand(fs[0], 1, fs[2], fs[3])
#
# offsets = offsets - torch.cat([h_add, w_add], dim=1).long()
# case2: visualize absolute position
offsets = offsets - torch.Tensor([fs[2]//2, fs[3]//2]).view(1,2,1,1).to(alpha).long()
y = self.W(y) + alpha
return y, (offsets, softmax_scale)
@staticmethod
def extract_patches(x, kernel=3, stride=1):
left =(kernel - stride + 1) // 2
right =(kernel - stride) // 2
x = F.pad(x, (left, right, left, right), mode='reflect')
all_patches = x.unfold(2, kernel, stride).unfold(3, kernel, stride)
return all_patches
@staticmethod
def reduce_mean(x):
for i in range(4):
if i <= 1:
continue
x = torch.mean(x, dim=i, keepdim=True)
return x
@staticmethod
def l2_norm(x):
def reduce_sum(x):
for i in range(4):
if i == 0:
continue
x = torch.sum(x, dim=i, keepdim=True)
return x
x = x**2
x = reduce_sum(x)
return torch.sqrt(x) | 10,696 | 40.785156 | 153 | py |
FGI-Matting | FGI-Matting-main/networks/decoders/res_shortcut_dec_lfm.py | from networks.decoders.resnet_dec import ResNet_D_Dec
from .self_attention import Self_Attn_trimap, Self_Attn
from networks.ops import SpectralNorm
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3,7)
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding = padding, bias = False)
def forward(self, x):
avgout = torch.mean(x, dim = 1,keepdim=True)
maxout, _ = torch.max(x, dim = 1,keepdim = True)
x = torch.cat([avgout, maxout], dim = 1)
x = self.conv(x)
return torch.sigmoid(x)
class ResShortCut_D_Dec_lfm(ResNet_D_Dec):
def __init__(self, block, layers, norm_layer=None, large_kernel=False, late_downsample=False):
super(ResShortCut_D_Dec_lfm, self).__init__(block, layers, norm_layer, large_kernel,
late_downsample=late_downsample)
# self.self_attention = Self_Attn_trimap(64)
# self.spa1 = SpatialAttention()
# self.spa2 = SpatialAttention()
self.layer4_1 = self._make_layer(block, 64, 2, stride=2)
self.conv1_1 = SpectralNorm(nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1, bias=False))
self.bn1_1 = norm_layer(32)
self.leaky_relu_1 = nn.LeakyReLU(0.2, inplace=True)
self.conv2_1 = conv3x3(32,1,stride=1)
self.layer4_2 = self._make_layer(block, 64, 2, stride=2)
self.conv1_2 = SpectralNorm(nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1, bias=False))
self.bn1_2 = norm_layer(32)
self.leaky_relu_2 = nn.LeakyReLU(0.2, inplace=True)
self.conv2_2 = conv3x3(32,1,stride=1)
self.conv_bn_relu_96_32 = nn.Sequential(
SpectralNorm(conv3x3(96, 32, stride = 1)),
norm_layer(planes * block.expansion),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv_bn_relu_32_16 = nn.Sequential(
SpectralNorm(conv3x3(32, 16, stride = 1)),
norm_layer(planes * block.expansion),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv_bn_relu_16 = nn.Sequential(
SpectralNorm(conv3x3(16, 16, stride = 1)),
norm_layer(planes * block.expansion),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv_bn_relu_16_8 = nn.Sequential(
SpectralNorm(conv3x3(16, 8, stride = 1)),
norm_layer(planes * block.expansion),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv_blend = conv3x3(8, 1, stride=1)
def forward(self, x, mid_fea):
# fea1, fea2, fea3, fea4, fea5 = mid_fea['shortcut']
fea1, fea2, fea3 = mid_fea['shortcut']#fea1 [B,31,512,512]
trimap = mid_fea['trimap']
x = x + fea3#[B,64,128,128]
Fp = self.layer4_1(x)#[B,32,256,256]
Fp = Fp + fea2
Fp = self.conv1_1(Fp)#[B,32,512,512]
Fp = self.bn1_1(Fp)
Fp = self.leaky_relu_1(Fp)
Fp_out = self.conv2_1(Fp)
Fp_out = torch.sigmoid(Fp_out)
Bp = self.layer4_2(x)#[B,32,256,256]
Bp = Bp + fea2
Bp = self.conv1_2(Bp)#[B,32,512,512]
Bp = self.bn1_2(Bp)
Bp = self.leaky_relu_2(Bp)
Bp_out = self.conv2_1(Bp)
Bp_out = torch.sigmoid(Bp_out)
fusion = torch.cat([Fp, Bp, fea1], dim = 1)
blend = self.conv_bn_relu_96_32(fusion)
blend = self.conv_bn_relu_32_16(blend)
blend = self.conv_bn_relu_16(blend)
blend = self.conv_bn_relu_16_8(blend)
blend = self.conv_blend(blend)
blend = torch.sigmoid(blend)
alpha = blend*Fp_out + (1-blend)(1-Bp_out)
# alpha = (self.tanh(x) + 1.0) / 2.0
return alpha, Fp_out, Bp_out, None
| 4,263 | 33.387097 | 111 | py |
FGI-Matting | FGI-Matting-main/networks/decoders/self_attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Self_Attn(nn.Module):
def __init__(self, in_dim, with_attention=False):
super (Self_Attn, self).__init__ ()
self.chanel_in = in_dim
# self.activation = activation
self.with_attention = with_attention
self.query_conv = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d (in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros (1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size ()
proj_query = self.query_conv (x).view (m_batchsize, -1, width * height).permute (0, 2, 1) # B X N X C
proj_key = self.key_conv (x).view (m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm (proj_query, proj_key) # transpose check
attention = self.softmax (energy) # BX (N) X (N)
proj_value = self.value_conv (x).view (m_batchsize, -1, width * height) # B X C X N
out = torch.bmm (proj_value, attention.permute (0, 2, 1))
out = out.view (m_batchsize, C, width, height)#B X C X H X W
out = self.gamma * out + x
if self.with_attention:
return out, attention
else:
return out
class Self_Attn_trimap(nn.Module):
def __init__(self, in_dim, with_attention=False):
super (Self_Attn_trimap, self).__init__ ()
self.chanel_in = in_dim
# self.activation = activation
self.with_attention = with_attention
self.query_conv_fg = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv_fg = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv_fg = nn.Conv2d (in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma_fg = nn.Parameter(torch.zeros (1))
# self.sigmoid_fg = nn.Sigmoid()
self.query_conv_bg = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv_bg = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv_bg = nn.Conv2d (in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma_bg = nn.Parameter(torch.zeros (1))
# self.sigmoid_bg = nn.Sigmoid()
self.query_conv_transition = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv_transition = nn.Conv2d (in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv_transition = nn.Conv2d (in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma_transition = nn.Parameter(torch.zeros (1))
# self.sigmoid_transition = nn.Sigmoid()
def fg_attention(self, x, trimap_fg):
m_batchsize, C, width, height = x.size()
x = x * trimap_fg
proj_query = self.query_conv_fg(x).view (m_batchsize, -1, width * height).permute (0, 2, 1) # B X N X C
proj_key = self.key_conv_fg(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = torch.sigmoid(energy) # BX (N) X (N)
proj_value = self.value_conv_fg(x).view (m_batchsize, -1, width * height) # B X C X N
out = torch.bmm (proj_value, attention.permute (0, 2, 1))
out = out.view (m_batchsize, C, width, height)#B X C X H X W
return out
def transition_attention(self, x, trimap_transition):
m_batchsize, C, width, height = x.size()
x = x * trimap_transition
proj_query = self.query_conv_transition(x).view (m_batchsize, -1, width * height).permute (0, 2, 1) # B X N X C
proj_key = self.key_conv_transition(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = torch.sigmoid(energy) # BX (N) X (N)
proj_value = self.value_conv_transition(x).view (m_batchsize, -1, width * height) # B X C X N
out = torch.bmm (proj_value, attention.permute (0, 2, 1))
out = out.view (m_batchsize, C, width, height)#B X C X H X W
return out
def bg_attention(self, x, trimap_bg):
m_batchsize, C, width, height = x.size()
x = x * trimap_bg
proj_query = self.query_conv_bg(x).view (m_batchsize, -1, width * height).permute (0, 2, 1) # B X N X C
proj_key = self.key_conv_bg(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = torch.sigmoid(energy) # BX (N) X (N)
proj_value = self.value_conv_bg(x).view (m_batchsize, -1, width * height) # B X C X N
out = torch.bmm (proj_value, attention.permute (0, 2, 1))
out = out.view (m_batchsize, C, width, height)#B X C X H X W
return out
def forward(self, x, trimap):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
trimap :
通道 0:背景 1:过渡 2:前景
"""
N,C,H,W = x.size()
x = F.interpolate(x, (H//4,W//4), mode="bilinear", align_corners=False)
trimap = F.interpolate(trimap, (H//4,W//4), mode="bilinear", align_corners=False)
trimap_bg = trimap[:,0:1,:,:]
trimap_transition = trimap[:,1:2,:,:]
trimap_fg = trimap[:,2:3,:,:]
out_fg = self.fg_attention(x, trimap_fg)
out_transition = self.transition_attention(x, trimap_transition)
out_bg = self.bg_attention(x, trimap_bg)
out = self.gamma_fg * out_fg+self.gamma_transition * out_transition + self.gamma_bg * out_bg + x
out = F.interpolate(out, (H,W), mode="bilinear", align_corners=False)
if self.with_attention:
return out, attention
else:
return out
if __name__=="__main__":
import time
import os
os.environ["CUDA_VISIBLE_DEVICES"]='0'
# net = Self_Attn(32).cuda()
# for i in range(50):
# inp1 = torch.zeros([4,32,64,64]).float().cuda()
# t1 = time.time()
# oup = net(inp1)
# print("time:",time.time()-t1)
# print(oup.size())
net = Self_Attn_trimap(64).eval().cuda()
with torch.no_grad():
for i in range(50):
inp1 = torch.rand([10,64,128,128]).float().cuda()
trimap = torch.rand([10,3,128,128]).float().cuda()
t1 = time.time()
oup = net(inp1, trimap)
print("time:",time.time()-t1)
print(oup.size())
| 6,353 | 34.3 | 114 | py |
FGI-Matting | FGI-Matting-main/networks/decoders/resnet_dec.py | import logging
import torch.nn as nn
from networks.ops import SpectralNorm
def conv5x5(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""5x5 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, groups=groups, bias=False, dilation=dilation)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, upsample=None, norm_layer=None, large_kernel=False):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.stride = stride
conv = conv5x5 if large_kernel else conv3x3
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
if self.stride > 1:
self.conv1 = SpectralNorm(nn.ConvTranspose2d(inplanes, inplanes, kernel_size=4, stride=2, padding=1, bias=False))
else:
self.conv1 = SpectralNorm(conv(inplanes, inplanes))
self.bn1 = norm_layer(inplanes)
self.activation = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = SpectralNorm(conv(inplanes, planes))
self.bn2 = norm_layer(planes)
self.upsample = upsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
if self.upsample is not None:
identity = self.upsample(x)
out += identity
out = self.activation(out)
return out
class ResNet_D_Dec(nn.Module):
def __init__(self, block, layers, norm_layer=None, large_kernel=False, late_downsample=False):
super(ResNet_D_Dec, self).__init__()
self.logger = logging.getLogger("Logger")
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.large_kernel = large_kernel
self.kernel_size = 5 if self.large_kernel else 3
self.inplanes = 512 if layers[0] > 0 else 256
self.late_downsample = late_downsample
self.midplanes = 64 if late_downsample else 32
self.conv1 = SpectralNorm(nn.ConvTranspose2d(self.midplanes, 32, kernel_size=4, stride=2, padding=1, bias=False))
self.bn1 = norm_layer(32)
self.leaky_relu = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(32, 1, kernel_size=self.kernel_size, stride=1, padding=self.kernel_size//2)
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
self.tanh = nn.Tanh()
self.layer1 = self._make_layer(block, 256, layers[0], stride=2)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.layer4 = self._make_layer(block, self.midplanes, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if hasattr(m, "weight_bar"):
nn.init.xavier_uniform_(m.weight_bar)
else:
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
self.logger.debug(self)
def _make_layer(self, block, planes, blocks, stride=1):
if blocks == 0:
return nn.Sequential(nn.Identity())
norm_layer = self._norm_layer
upsample = None
if stride != 1:
upsample = nn.Sequential(
nn.UpsamplingNearest2d(scale_factor=2),
SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),
norm_layer(planes * block.expansion),
)
elif self.inplanes != planes * block.expansion:
upsample = nn.Sequential(
SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),
norm_layer(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, upsample, norm_layer, self.large_kernel)]
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer, large_kernel=self.large_kernel))
return nn.Sequential(*layers)
def forward(self, x, mid_fea):
x = self.layer1(x) # N x 256 x 32 x 32
x = self.layer2(x) # N x 128 x 64 x 64
x = self.layer3(x) # N x 64 x 128 x 128
x = self.layer4(x) # N x 32 x 256 x 256
x = self.conv1(x)
x = self.bn1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
alpha = (self.tanh(x) + 1.0) / 2.0
return alpha, None
| 5,600 | 37.895833 | 125 | py |
FGI-Matting | FGI-Matting-main/networks/decoders/res_shortcut_dec_spatial_attn.py | from networks.decoders.resnet_dec import ResNet_D_Dec
from .self_attention import Self_Attn_trimap, Self_Attn
import torch
import torch.nn as nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3,7)
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding = padding, bias = False)
def forward(self, x):
avgout = torch.mean(x, dim = 1,keepdim=True)
maxout, _ = torch.max(x, dim = 1,keepdim = True)
x = torch.cat([avgout, maxout], dim = 1)
x = self.conv(x)
return torch.sigmoid(x)
class ResShortCut_D_Dec_spatial_attn(ResNet_D_Dec):
def __init__(self, block, layers, norm_layer=None, large_kernel=False, late_downsample=False):
super(ResShortCut_D_Dec_spatial_attn, self).__init__(block, layers, norm_layer, large_kernel,
late_downsample=late_downsample)
# self.self_attention = Self_Attn_trimap(64)
self.spa1 = SpatialAttention()
self.spa2 = SpatialAttention()
def forward(self, x, mid_fea, trimap):
# fea1, fea2, fea3, fea4, fea5 = mid_fea['shortcut']
fea1, fea2, fea3 = mid_fea['shortcut']
# x = self.layer1(x) + fea5
# x = self.layer2(x) + fea4
# x = self.layer3(x) + fea3
x = x + fea3#[B,64,128,128]
#print(x.size())
# x = self.self_attention(x, trimap)
x = self.layer4(x)#[B,32,256,256]
#print(x.size())
x = x + self.spa1(x) * fea2
x = self.conv1(x)
x = self.bn1(x)
x = self.leaky_relu(x) + self.spa2(x) * fea1
x = self.conv2(x)
alpha = (self.tanh(x) + 1.0) / 2.0
# fea1, fea2, fea3, fea4, fea5 = mid_fea['shortcut']
# x = self.layer1(x) + fea5
# x = self.layer2(x) + fea4
# x = self.layer3(x) + fea3
# x = self.layer4(x) + fea2
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.leaky_relu(x) + fea1
# x = self.conv2(x)
# alpha = (self.tanh(x) + 1.0) / 2.0
return alpha, None
| 2,206 | 30.528571 | 101 | py |
FGI-Matting | FGI-Matting-main/networks/encoders/res_shortcut_enc.py | import torch.nn as nn
from utils import CONFIG
from networks.encoders.resnet_enc import ResNet_D
from networks.ops import SpectralNorm
from networks.fpemjpu import FPEM_FUSION
class ResShortCut_D(ResNet_D):
def __init__(self, block, layers, norm_layer=None, late_downsample=False):
super(ResShortCut_D, self).__init__(block, layers, norm_layer, late_downsample=late_downsample)
first_inplane = 3 + CONFIG.model.trimap_channel
self.shortcut_inplane = [first_inplane, self.midplanes, 64, 128, 256]
self.shortcut_plane = [32, self.midplanes, 64, 128, 256]
self.shortcut = nn.ModuleList()
for stage, inplane in enumerate(self.shortcut_inplane):
self.shortcut.append(self._make_shortcut(inplane, self.shortcut_plane[stage]))
self.fpem = FPEM_FUSION(backbone_out_channels=[64,128,256,512],fpem_repeat=1)
def _make_shortcut(self, inplane, planes):
return nn.Sequential(
SpectralNorm(nn.Conv2d(inplane, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes),
SpectralNorm(nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes)
)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
x1 = self.activation(out) # N x 32 x 256 x 256
out = self.conv3(x1)
out = self.bn3(out)
out = self.activation(out)
x2 = self.layer1(out) # N x 64 x 128 x 128
x3= self.layer2(x2) # N x 128 x 64 x 64
x4 = self.layer3(x3) # N x 256 x 32 x 32
out = self.layer_bottleneck(x4) # N x 512 x 16 x 16
# print('o1',out.size())
fea1 = self.shortcut[0](x) # input image and trimap
fea2 = self.shortcut[1](x1)
fea3 = self.shortcut[2](x2)
# fea4 = self.shortcut[3](x3)
# fea5 = self.shortcut[4](x4)
out = self.fpem(x2,x3,x4,out)#[10,64,128,128]
# print('o2',out.size())
return out, {'shortcut':(fea1, fea2, fea3), 'image':x[:,:3,...]} | 2,240 | 39.017857 | 103 | py |
FGI-Matting | FGI-Matting-main/networks/encoders/__init__.py | import logging
from .resnet_enc import ResNet_D, BasicBlock
from .res_shortcut_enc import ResShortCut_D
from .res_gca_enc import ResGuidedCxtAtten
from .res_shortcut_enc_spatial_attn import ResShortCut_D_spatial_attn
__all__ = ['res_shortcut_encoder_29', 'resnet_gca_encoder_29','res_shortcut_encoder_29_spatial_attn','res_shortcut_encoder_29_lfm']
def _res_shortcut_D(block, layers, **kwargs):
model = ResShortCut_D(block, layers, **kwargs)
return model
def _res_gca_D(block, layers, **kwargs):
model = ResGuidedCxtAtten(block, layers, **kwargs)
return model
def resnet_gca_encoder_29(**kwargs):
"""Constructs a resnet_encoder_29 model.
"""
return _res_gca_D(BasicBlock, [3, 4, 4, 2], **kwargs)
def res_shortcut_encoder_29(**kwargs):
"""Constructs a resnet_encoder_25 model.
"""
return _res_shortcut_D(BasicBlock, [3, 4, 4, 2], **kwargs)
#加空间attention
def res_shortcut_encoder_29_spatial_attn(**kwargs):
return _res_shortcut_D_spatial_attn(BasicBlock, [3, 4, 4, 2], **kwargs)
def _res_shortcut_D_spatial_attn(block, layers, **kwargs):
model = ResShortCut_D_spatial_attn(block, layers, **kwargs)
return model
if __name__ == "__main__":
import torch
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%m-%d %H:%M:%S')
resnet_encoder = res_shortcut_encoder_29()
x = torch.randn(4,6,512,512)
z = resnet_encoder(x)
print(z[0].shape)
| 1,501 | 25.821429 | 131 | py |
FGI-Matting | FGI-Matting-main/networks/encoders/res_gca_enc.py | import torch.nn as nn
import torch.nn.functional as F
from utils import CONFIG
from networks.encoders.resnet_enc import ResNet_D
from networks.ops import GuidedCxtAtten, SpectralNorm
class ResGuidedCxtAtten(ResNet_D):
def __init__(self, block, layers, norm_layer=None, late_downsample=False):
super(ResGuidedCxtAtten, self).__init__(block, layers, norm_layer, late_downsample=late_downsample)
first_inplane = 3 + CONFIG.model.trimap_channel
self.shortcut_inplane = [first_inplane, self.midplanes, 64, 128, 256]
self.shortcut_plane = [32, self.midplanes, 64, 128, 256]
self.shortcut = nn.ModuleList()
for stage, inplane in enumerate(self.shortcut_inplane):
self.shortcut.append(self._make_shortcut(inplane, self.shortcut_plane[stage]))
self.guidance_head = nn.Sequential(
nn.ReflectionPad2d(1),
SpectralNorm(nn.Conv2d(3, 16, kernel_size=3, padding=0, stride=2, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(16),
nn.ReflectionPad2d(1),
SpectralNorm(nn.Conv2d(16, 32, kernel_size=3, padding=0, stride=2, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(32),
nn.ReflectionPad2d(1),
SpectralNorm(nn.Conv2d(32, 128, kernel_size=3, padding=0, stride=2, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(128)
)
self.gca = GuidedCxtAtten(128, 128)
# initialize guidance head
for layers in range(len(self.guidance_head)):
m = self.guidance_head[layers]
if isinstance(m, nn.Conv2d):
if hasattr(m, "weight_bar"):
nn.init.xavier_uniform_(m.weight_bar)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_shortcut(self, inplane, planes):
return nn.Sequential(
SpectralNorm(nn.Conv2d(inplane, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes),
SpectralNorm(nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes)
)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
x1 = self.activation(out) # N x 32 x 256 x 256
out = self.conv3(x1)
out = self.bn3(out)
out = self.activation(out)
im_fea = self.guidance_head(x[:,:3,...]) # downsample origin image and extract features
if CONFIG.model.trimap_channel == 3:
unknown = F.interpolate(x[:,4:5,...], scale_factor=1/8, mode='nearest')
else:
unknown = F.interpolate(x[:,3:,...].eq(1.).float(), scale_factor=1/8, mode='nearest')
x2 = self.layer1(out) # N x 64 x 128 x 128
x3= self.layer2(x2) # N x 128 x 64 x 64
x3, offset = self.gca(im_fea, x3, unknown) # contextual attention
x4 = self.layer3(x3) # N x 256 x 32 x 32
out = self.layer_bottleneck(x4) # N x 512 x 16 x 16
fea1 = self.shortcut[0](x) # input image and trimap
fea2 = self.shortcut[1](x1)
fea3 = self.shortcut[2](x2)
fea4 = self.shortcut[3](x3)
fea5 = self.shortcut[4](x4)
return out, {'shortcut':(fea1, fea2, fea3, fea4, fea5),
'image_fea':im_fea,
'unknown':unknown,
'offset_1':offset}
if __name__ == "__main__":
from networks.encoders.resnet_enc import BasicBlock
m = ResGuidedCxtAtten(BasicBlock, [3, 4, 4, 2])
for m in m.modules():
print(m)
| 3,850 | 37.89899 | 107 | py |
FGI-Matting | FGI-Matting-main/networks/encoders/resnet_enc.py | import logging
import torch.nn as nn
from utils import CONFIG
from networks.ops import SpectralNorm
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = SpectralNorm(conv3x3(inplanes, planes, stride))
self.bn1 = norm_layer(planes)
self.activation = nn.ReLU(inplace=True)
self.conv2 = SpectralNorm(conv3x3(planes, planes))
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.activation(out)
return out
class ResNet_D(nn.Module):
"""
Implement and pre-train on ImageNet with the tricks from
https://arxiv.org/abs/1812.01187
without the mix-up part.
"""
def __init__(self, block, layers, norm_layer=None, late_downsample=False):#layers [3,4,4,2]
super(ResNet_D, self).__init__()
self.logger = logging.getLogger("Logger")
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.late_downsample = late_downsample
self.midplanes = 64 if late_downsample else 32
self.start_stride = [1, 2, 1, 2] if late_downsample else [2, 1, 2, 1]
self.conv1 = SpectralNorm(nn.Conv2d(3 + CONFIG.model.trimap_channel, 32, kernel_size=3,
stride=self.start_stride[0], padding=1, bias=False))
self.conv2 = SpectralNorm(nn.Conv2d(32, self.midplanes, kernel_size=3, stride=self.start_stride[1], padding=1,
bias=False))
self.conv3 = SpectralNorm(nn.Conv2d(self.midplanes, self.inplanes, kernel_size=3, stride=self.start_stride[2],
padding=1, bias=False))
self.bn1 = norm_layer(32)
self.bn2 = norm_layer(self.midplanes)
self.bn3 = norm_layer(self.inplanes)
self.activation = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], stride=self.start_stride[3])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer_bottleneck = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight_bar)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
self.logger.debug("encoder conv1 weight shape: {}".format(str(self.conv1.module.weight_bar.data.shape)))
self.conv1.module.weight_bar.data[:,3:,:,:] = 0
self.logger.debug(self)
def _make_layer(self, block, planes, blocks, stride=1):
if blocks == 0:
return nn.Sequential(nn.Identity())
norm_layer = self._norm_layer
downsample = None
if stride != 1:
downsample = nn.Sequential(
nn.AvgPool2d(2, stride),
SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),
norm_layer(planes * block.expansion),
)
elif self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
SpectralNorm(conv1x1(self.inplanes, planes * block.expansion, stride)),
norm_layer(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample, norm_layer)]
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activation(x)
x = self.conv2(x)
x = self.bn2(x)
x1 = self.activation(x) # N x 32 x 256 x 256
x = self.conv3(x1)
x = self.bn3(x)
x2 = self.activation(x) # N x 64 x 128 x 128
x3 = self.layer1(x2) # N x 64 x 128 x 128
x4 = self.layer2(x3) # N x 128 x 64 x 64
x5 = self.layer3(x4) # N x 256 x 32 x 32
x = self.layer_bottleneck(x5) # N x 512 x 16 x 16
return x, (x1, x2, x3, x4, x5)
if __name__ == "__main__":
m = ResNet_D(BasicBlock, [3, 4, 4, 2])
for m in m.modules():
print(m._get_name())
| 5,836 | 36.902597 | 118 | py |
FGI-Matting | FGI-Matting-main/networks/encoders/res_shortcut_enc_spatial_attn.py | import torch.nn as nn
from utils import CONFIG
from networks.encoders.resnet_enc import ResNet_D
from networks.ops import SpectralNorm
from networks.fpemjpu import FPEM_FUSION
class ResShortCut_D_spatial_attn(ResNet_D):
def __init__(self, block, layers, norm_layer=None, late_downsample=False):
super(ResShortCut_D_spatial_attn, self).__init__(block, layers, norm_layer, late_downsample=late_downsample)
first_inplane = 3 + CONFIG.model.trimap_channel
self.shortcut_inplane = [first_inplane, self.midplanes, 64, 128, 256]
self.shortcut_plane = [32, self.midplanes, 64, 128, 256]
self.shortcut = nn.ModuleList()
for stage, inplane in enumerate(self.shortcut_inplane):
self.shortcut.append(self._make_shortcut(inplane, self.shortcut_plane[stage]))
self.fpem = FPEM_FUSION(backbone_out_channels=[64,128,256,512],fpem_repeat=1)
def _make_shortcut(self, inplane, planes):
return nn.Sequential(
SpectralNorm(nn.Conv2d(inplane, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes),
SpectralNorm(nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes)
)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
x1 = self.activation(out) # N x 32 x 256 x 256
out = self.conv3(x1)
out = self.bn3(out)
out = self.activation(out)
x2 = self.layer1(out) # N x 64 x 128 x 128
x3= self.layer2(x2) # N x 128 x 64 x 64
x4 = self.layer3(x3) # N x 256 x 32 x 32
out = self.layer_bottleneck(x4) # N x 512 x 16 x 16
# print('o1',out.size())
fea1 = self.shortcut[0](x) # input image and trimap
fea2 = self.shortcut[1](x1)
fea3 = self.shortcut[2](x2)
# fea4 = self.shortcut[3](x3)
# fea5 = self.shortcut[4](x4)
out = self.fpem(x2,x3,x4,out)#[10,64,128,128]
# print('o2',out.size())
return out, {'shortcut':(fea1, fea2, fea3), 'image':x[:,:3,...]}
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.activation(out)
# out = self.conv2(out)
# out = self.bn2(out)
# x1 = self.activation(out) # N x 32 x 256 x 256
# out = self.conv3(x1)
# out = self.bn3(out)
# out = self.activation(out)
# x2 = self.layer1(out) # N x 64 x 128 x 128
# x3= self.layer2(x2) # N x 128 x 64 x 64
# x4 = self.layer3(x3) # N x 256 x 32 x 32
# out = self.layer_bottleneck(x4) # N x 512 x 16 x 16
# fea1 = self.shortcut[0](x) # input image and trimap
# fea2 = self.shortcut[1](x1)
# fea3 = self.shortcut[2](x2)
# fea4 = self.shortcut[3](x3)
# fea5 = self.shortcut[4](x4)
# # out = self.fpem(x2,x3,x4,out)
# return out, {'shortcut':(fea1, fea2, fea3, fea4, fea5), 'image':x[:,:3,...]} | 3,160 | 38.024691 | 116 | py |
FGI-Matting | FGI-Matting-main/utils/logger.py | import os
import cv2
import torch
import logging
import datetime
import numpy as np
from pprint import pprint
from utils import util
from utils.config import CONFIG
LEVELS = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
def make_color_wheel():
# from https://github.com/JiahuiYu/generative_inpainting/blob/master/inpaint_ops.py
RY, YG, GC, CB, BM, MR = (15, 6, 4, 11, 13, 6)
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
COLORWHEEL = make_color_wheel()
def compute_color(u,v):
# from https://github.com/JiahuiYu/generative_inpainting/blob/master/inpaint_ops.py
h, w = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = COLORWHEEL
# colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def flow_to_image(flow):
# part from https://github.com/JiahuiYu/generative_inpainting/blob/master/inpaint_ops.py
maxrad = -1
u = flow[0, :, :]
v = flow[1, :, :]
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
return img
def put_text(image, text, position=(10, 20)):
image = cv2.resize(image.transpose([1, 2, 0]), (512, 512), interpolation=cv2.INTER_NEAREST)
return cv2.putText(image, text, position, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 0, thickness=2).transpose([2, 0, 1])
class MyLogger(logging.Logger):
"""
Only write log in the first subprocess
"""
def __init__(self, *args, **kwargs):
super(MyLogger, self).__init__(*args, **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
if CONFIG.local_rank == 0:
super()._log(level, msg, args, exc_info, extra, stack_info)
def get_logger(log_dir=None, tb_log_dir=None, logging_level="DEBUG"):
"""
Return a default build-in logger if log_file=None and tb_log_dir=None
Return a build-in logger which dump stdout to log_file if log_file is assigned
Return a build-in logger and tensorboard summary writer if tb_log_dir is assigned
:param log_file: logging file dumped from stdout
:param tb_log_dir: tensorboard dir
:param logging_level:
:return: Logger or [Logger, TensorBoardLogger]
"""
level = LEVELS[logging_level.upper()]
exp_string = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('Logger')
logger.setLevel(level)
# create formatter
formatter = logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s', datefmt='%m-%d %H:%M:%S')
# create console handler
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
# create file handler
if log_dir is not None and CONFIG.local_rank == 0:
log_file = os.path.join(log_dir, exp_string)
fh = logging.FileHandler(log_file+'.log', mode='w')
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
pprint(CONFIG, stream=fh.stream)
# create tensorboard summary writer
if tb_log_dir is not None:
tb_logger = TensorBoardLogger(tb_log_dir=tb_log_dir, exp_string=exp_string)
return logger, tb_logger
else:
return logger
def normalize_image(image):
"""
normalize image array to 0~1
"""
image_flat = torch.flatten(image, start_dim=1)
return (image - image_flat.min(dim=1, keepdim=False)[0].view(3,1,1)) / (
image_flat.max(dim=1, keepdim=False)[0].view(3,1,1) - image_flat.min(dim=1, keepdim=False)[0].view(3,1,1) + 1e-8)
| 5,334 | 30.382353 | 129 | py |
FGI-Matting | FGI-Matting-main/utils/util.py | import os
import cv2
import torch
import logging
import numpy as np
from utils.config import CONFIG
import torch.distributed as dist
def make_dir(target_dir):
"""
Create dir if not exists
"""
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def print_network(model, name):
"""
Print out the network information
"""
logger = logging.getLogger("Logger")
num_params = 0
for p in model.parameters():
num_params += p.numel()
logger.info(model)
logger.info(name)
logger.info("Number of parameters: {}".format(num_params))
def update_lr(lr, optimizer):
"""
update learning rates
"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def warmup_lr(init_lr, step, iter_num):
"""
Warm up learning rate
"""
return step/iter_num*init_lr
def add_prefix_state_dict(state_dict, prefix="module"):
"""
add prefix from the key of pretrained state dict for Data-Parallel
"""
new_state_dict = {}
first_state_name = list(state_dict.keys())[0]
if not first_state_name.startswith(prefix):
for key, value in state_dict.items():
new_state_dict[prefix+"."+key] = state_dict[key].float()
else:
for key, value in state_dict.items():
new_state_dict[key] = state_dict[key].float()
return new_state_dict
def remove_prefix_state_dict(state_dict, prefix="module"):
"""
remove prefix from the key of pretrained state dict for Data-Parallel
"""
new_state_dict = {}
first_state_name = list(state_dict.keys())[0]
if not first_state_name.startswith(prefix):
for key, value in state_dict.items():
new_state_dict[key] = state_dict[key].float()
else:
for key, value in state_dict.items():
new_state_dict[key[len(prefix)+1:]] = state_dict[key].float()
return new_state_dict
def load_imagenet_pretrain(model, checkpoint_file):
"""
Load imagenet pretrained resnet
Add zeros channel to the first convolution layer
Since we have the spectral normalization, we need to do a little more
"""
# checkpoint = torch.load(checkpoint_file)
checkpoint = torch.load(checkpoint_file, map_location = lambda storage, loc: storage.cuda(CONFIG.gpu))
# checkpoint = torch.load(checkpoint_file, map_location = lambda storage, loc: storage)
state_dict = remove_prefix_state_dict(checkpoint['state_dict'])
for key, value in state_dict.items():
state_dict[key] = state_dict[key].float()
logger = logging.getLogger("Logger")
logger.debug("Imagenet pretrained keys:")
logger.debug(state_dict.keys())
logger.debug("Generator keys:")
logger.debug(model.module.encoder.state_dict().keys())
logger.debug("Intersection keys:")
logger.debug(set(model.module.encoder.state_dict().keys())&set(state_dict.keys()))
weight_u = state_dict["conv1.module.weight_u"]
weight_v = state_dict["conv1.module.weight_v"]
weight_bar = state_dict["conv1.module.weight_bar"]
logger.debug("weight_v: {}".format(weight_v))
logger.debug("weight_bar: {}".format(weight_bar.view(32, -1)))
logger.debug("sigma: {}".format(weight_u.dot(weight_bar.view(32, -1).mv(weight_v))))
new_weight_v = torch.zeros((3+CONFIG.model.trimap_channel), 3, 3).cuda()
new_weight_bar = torch.zeros(32, (3+CONFIG.model.trimap_channel), 3, 3).cuda()
new_weight_v[:3, :, :].copy_(weight_v.view(3, 3, 3))
new_weight_bar[:, :3, :, :].copy_(weight_bar)
logger.debug("new weight_v: {}".format(new_weight_v.view(-1)))
logger.debug("new weight_bar: {}".format(new_weight_bar.view(32, -1)))
logger.debug("new sigma: {}".format(weight_u.dot(new_weight_bar.view(32, -1).mv(new_weight_v.view(-1)))))
state_dict["conv1.module.weight_v"] = new_weight_v.view(-1)
state_dict["conv1.module.weight_bar"] = new_weight_bar
model_dict = model.module.encoder.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)#加入没有的,覆盖已有的
model.module.encoder.load_state_dict(model_dict, strict=False)
def load_VGG_pretrain(model, checkpoint_file):
"""
Load imagenet pretrained resnet
Add zeros channel to the first convolution layer
Since we have the spectral normalization, we need to do a little more
"""
checkpoint = torch.load(checkpoint_file, map_location = lambda storage, loc: storage.cuda())
backbone_state_dict = remove_prefix_state_dict(checkpoint['state_dict'])
model.module.encoder.load_state_dict(backbone_state_dict, strict=False)
def get_unknown_tensor(trimap):
"""
get 1-channel unknown area tensor from the 3-channel/1-channel trimap tensor
"""
if CONFIG.model.trimap_channel == 3:
weight = trimap[:, 1:2, :, :].float()
else:
weight = trimap.eq(1).float()
return weight
def get_known_tensor(trimap):
"""
get 1-channel known area tensor from the 3-channel/1-channel trimap tensor
"""
if CONFIG.model.trimap_channel == 3:
weight = trimap[:, 1:2, :, :].float()
weight = weight.eq(0).float()
else:
weight = trimap.eq(1).float()
weight = weight.eq(0).float()
return weight
def get_gaborfilter(angles):
"""
generate gabor filter as the conv kernel
:param angles: number of different angles
"""
gabor_filter = []
for angle in range(angles):
gabor_filter.append(cv2.getGaborKernel(ksize=(5,5), sigma=0.5, theta=angle*np.pi/8, lambd=5, gamma=0.5))
gabor_filter = np.array(gabor_filter)
gabor_filter = np.expand_dims(gabor_filter, axis=1)
return gabor_filter.astype(np.float32)
def get_gradfilter():
"""
generate gradient filter as the conv kernel
"""
grad_filter = []
grad_filter.append([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
grad_filter.append([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
grad_filter = np.array(grad_filter)
grad_filter = np.expand_dims(grad_filter, axis=1)
return grad_filter.astype(np.float32)
def reduce_tensor_dict(tensor_dict, mode='mean'):
"""
average tensor dict over different GPUs
"""
for key, tensor in tensor_dict.items():
if tensor is not None:
tensor_dict[key] = reduce_tensor(tensor, mode)
return tensor_dict
def reduce_tensor(tensor, mode='mean'):
"""
average tensor over different GPUs
"""
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if mode == 'mean':
rt /= CONFIG.world_size
elif mode == 'sum':
pass
else:
raise NotImplementedError("reduce mode can only be 'mean' or 'sum'")
return rt
if __name__ == "__main__":
import networks
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%m-%d %H:%M:%S')
G = networks.get_generator().cuda()
load_imagenet_pretrain(G, CONFIG.model.imagenet_pretrain_path)
x = torch.randn(4,3,512,512).cuda()
y = torch.randn(4,3,512,512).cuda()
z = G(x, y)
| 7,224 | 31.254464 | 112 | py |
FGI-Matting | FGI-Matting-main/dataloader/Test_dataset/data_generator.py | import cv2
import os
import math
import numbers
import random
import logging
import copy
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.nn import functional as F
from torchvision import transforms
trimap_channel = 1
random_interp = False
crop_size = 512
augmentation = True
radius = 15
radius_extend = 50
num_points =10
interp_list = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]
def maybe_random_interp(cv2_interp):
if random_interp:
return np.random.choice(interp_list)
else:
return cv2_interp
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors with normalization.
"""
def __init__(self, phase="test"):
self.mean = torch.tensor([0.485, 0.456, 0.406]).view(3,1,1)
self.std = torch.tensor([0.229, 0.224, 0.225]).view(3,1,1)
self.phase = phase
def __call__(self, sample):
if(self.phase == "train"):
# convert BGR images to RGB
image, alpha, trimap = sample['image'][:,:,::-1], sample['alpha'], sample['trimap']
# convert HWC to CHW
image = image.transpose((2, 0, 1)).astype(np.float32)
# normalize image
image /= 255.
alpha = np.expand_dims(alpha.astype(np.float32), axis=0)
trimap = trimap.astype(np.float32)
trimap[trimap < 85] = 0
trimap[trimap >= 170] = 2
trimap[trimap >= 85] = 1
# convert BGR fg and bg to RGB
fg = sample['fg'][:,:,::-1].transpose((2, 0, 1)).astype(np.float32) / 255.
sample['fg'] = torch.from_numpy(fg).sub_(self.mean).div_(self.std)
bg = sample['bg'][:,:,::-1].transpose((2, 0, 1)).astype(np.float32) / 255.
sample['bg'] = torch.from_numpy(bg).sub_(self.mean).div_(self.std)
sample['image'], sample['alpha'], sample['trimap'] = \
torch.from_numpy(image), torch.from_numpy(alpha), torch.from_numpy(trimap)
sample['image'] = sample['image'].sub_(self.mean).div_(self.std)
if trimap_channel == 3:
sample['trimap'] = F.one_hot(sample['trimap'], num_classes=3).permute(2,0,1).float()
elif trimap_channel == 1:
sample['trimap'] = sample['trimap'][None,...].float()
else:
raise NotImplementedError("trimap_channel can only be 3 or 1")
elif(self.phase == "val"):
# convert BGR images to RGB
image, alpha, trimap = sample['image'][:,:,::-1], sample['alpha'], sample['trimap']
# convert HWC to CHW
image = image.transpose((2, 0, 1)).astype(np.float32)
# normalize image
image /= 255.
alpha = np.expand_dims(alpha.astype(np.float32), axis=0)
trimap = trimap.astype(np.float32)
trimap[trimap < 85] = 0
trimap[trimap >= 170] = 2
trimap[trimap >= 85] = 1
sample['image'], sample['alpha'], sample['trimap'] = \
torch.from_numpy(image), torch.from_numpy(alpha), torch.from_numpy(trimap)
sample['image'] = sample['image'].sub_(self.mean).div_(self.std)
if trimap_channel == 3:
sample['trimap'] = F.one_hot(sample['trimap'], num_classes=3).permute(2,0,1).float()
elif trimap_channel == 1:
sample['trimap'] = sample['trimap'][None,...].float()
else:
raise NotImplementedError("trimap_channel can only be 3 or 1")
else:#test
# convert BGR images to RGB
image, alpha, trimap = sample['image'][:,:,::-1], sample['alpha'], sample['trimap']
guidancemap_ = sample["guidancemap"]
# convert HWC to CHW
image = image.transpose((2, 0, 1)).astype(np.float32)
# normalize image
image /= 255.
alpha = np.expand_dims(alpha.astype(np.float32), axis=0)
guidancemap_ = guidancemap_.astype(np.float32)
guidancemap_[guidancemap_ < 85] = 0 #0 bg
guidancemap_[guidancemap_ >= 170] = 2 #255 fg
guidancemap_[guidancemap_ >= 85] = 1 #128 tr
trimap = trimap.astype(np.float32)
trimap[trimap < 85] = 0
trimap[trimap >= 170] = 2
trimap[trimap >= 85] = 1
sample['image'], sample['alpha'], sample['trimap'] = \
torch.from_numpy(image), torch.from_numpy(alpha), torch.from_numpy(trimap).to(torch.long)
sample['image'] = sample['image'].sub_(self.mean).div_(self.std)
sample["guidancemap"] = torch.from_numpy(guidancemap_ ).to(torch.long)#为了能使用通道为3的情况
if trimap_channel == 3:
sample['guidancemap'] = F.one_hot(sample['guidancemap'], num_classes=3).permute(2,0,1).float()
sample['trimap'] = F.one_hot(sample['trimap'], num_classes=3).permute(2,0,1).float()
elif trimap_channel == 1:
sample['guidancemap'] = sample['guidancemap'][None,...].float()
sample['trimap'] = sample['trimap'][None,...].float()
else:
raise NotImplementedError("trimap_channel can only be 3 or 1")
return sample
class RandomAffine(object):
"""
Random affine translation
"""
def __init__(self, degrees, translate=None, scale=None, shear=None, flip=None, resample=False, fillcolor=0):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
"scale should be a list or tuple and it must be of length 2."
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError("If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and len(shear) == 2, \
"shear should be a list or tuple and it must be of length 2."
self.shear = shear
else:
self.shear = shear
self.resample = resample
self.fillcolor = fillcolor
self.flip = flip
@staticmethod
def get_params(degrees, translate, scale_ranges, shears, flip, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = (random.uniform(scale_ranges[0], scale_ranges[1]),
random.uniform(scale_ranges[0], scale_ranges[1]))
else:
scale = (1.0, 1.0)
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
if flip is not None:
flip = (np.random.rand(2) < flip).astype(np.int) * 2 - 1
return angle, translations, scale, shear, flip
def __call__(self, sample):
fg, alpha = sample['fg'], sample['alpha']
rows, cols, ch = fg.shape
if np.maximum(rows, cols) < 1024:
params = self.get_params((0, 0), self.translate, self.scale, self.shear, self.flip, fg.size)
else:
params = self.get_params(self.degrees, self.translate, self.scale, self.shear, self.flip, fg.size)
center = (cols * 0.5 + 0.5, rows * 0.5 + 0.5)
M = self._get_inverse_affine_matrix(center, *params)
M = np.array(M).reshape((2, 3))
fg = cv2.warpAffine(fg, M, (cols, rows),
flags=maybe_random_interp(cv2.INTER_NEAREST) + cv2.WARP_INVERSE_MAP)
alpha = cv2.warpAffine(alpha, M, (cols, rows),
flags=maybe_random_interp(cv2.INTER_NEAREST) + cv2.WARP_INVERSE_MAP)
sample['fg'], sample['alpha'] = fg, alpha
return sample
@ staticmethod
def _get_inverse_affine_matrix(center, angle, translate, scale, shear, flip):
# Helper method to compute inverse matrix for affine transformation
# As it is explained in PIL.Image.rotate
# We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RSS is rotation with scale and shear matrix
# It is different from the original function in torchvision
# The order are changed to flip -> scale -> rotation -> shear
# x and y have different scale factors
# RSS(shear, a, scale, f) = [ cos(a + shear)*scale_x*f -sin(a + shear)*scale_y 0]
# [ sin(a)*scale_x*f cos(a)*scale_y 0]
# [ 0 0 1]
# Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1
angle = math.radians(angle)
shear = math.radians(shear)
scale_x = 1.0 / scale[0] * flip[0]
scale_y = 1.0 / scale[1] * flip[1]
# Inverted rotation matrix with scale and shear
d = math.cos(angle + shear) * math.cos(angle) + math.sin(angle + shear) * math.sin(angle)
matrix = [
math.cos(angle) * scale_x, math.sin(angle + shear) * scale_x, 0,
-math.sin(angle) * scale_y, math.cos(angle + shear) * scale_y, 0
]
matrix = [m / d for m in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (-center[1] - translate[1])
matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (-center[1] - translate[1])
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += center[0]
matrix[5] += center[1]
return matrix
class RandomJitter(object):
"""
Random change the hue of the image
"""
def __call__(self, sample):
fg, alpha = sample['fg'], sample['alpha']
# if alpha is all 0 skip
if np.all(alpha==0):
return sample
# convert to HSV space, convert to float32 image to keep precision during space conversion.
fg = cv2.cvtColor(fg.astype(np.float32)/255.0, cv2.COLOR_BGR2HSV)
# Hue noise
hue_jitter = np.random.randint(-40, 40)
fg[:, :, 0] = np.remainder(fg[:, :, 0].astype(np.float32) + hue_jitter, 360)
# Saturation noise
sat_bar = fg[:, :, 1][alpha > 0].mean()
sat_jitter = np.random.rand()*(1.1 - sat_bar)/5 - (1.1 - sat_bar) / 10
sat = fg[:, :, 1]
sat = np.abs(sat + sat_jitter)
sat[sat>1] = 2 - sat[sat>1]
fg[:, :, 1] = sat
# Value noise
val_bar = fg[:, :, 2][alpha > 0].mean()
val_jitter = np.random.rand()*(1.1 - val_bar)/5-(1.1 - val_bar) / 10
val = fg[:, :, 2]
val = np.abs(val + val_jitter)
val[val>1] = 2 - val[val>1]
fg[:, :, 2] = val
# convert back to BGR space
fg = cv2.cvtColor(fg, cv2.COLOR_HSV2BGR)
sample['fg'] = fg*255
return sample
class RandomHorizontalFlip(object):
"""
Random flip image and label horizontally
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, sample):
fg, alpha = sample['fg'], sample['alpha']
if np.random.uniform(0, 1) < self.prob:
fg = cv2.flip(fg, 1)
alpha = cv2.flip(alpha, 1)
sample['fg'], sample['alpha'] = fg, alpha
return sample
class RandomCrop(object):
"""
Crop randomly the image in a sample, retain the center 1/4 images, and resize to 'output_size'
:param output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size=( crop_size, crop_size)):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
self.margin = output_size[0] // 2
self.logger = logging.getLogger("Logger")
def __call__(self, sample):
fg, alpha, trimap, name = sample['fg'], sample['alpha'], sample['trimap'], sample['image_name']
bg = sample['bg']
h, w = trimap.shape
bg = cv2.resize(bg, (w, h), interpolation=maybe_random_interp(cv2.INTER_CUBIC))#背景长宽变成alpha/trimap长宽
if w < self.output_size[0]+1 or h < self.output_size[1]+1:
ratio = 1.1*self.output_size[0]/h if h < w else 1.1*self.output_size[1]/w#计算短边resize到1.1*self.output_size的ratio
# self.logger.warning("Size of {} is {}.".format(name, (h, w)))
while h < self.output_size[0]+1 or w < self.output_size[1]+1:#当长或宽小于self.output_size时,使用ratio对fg、alpha、trimap、bg进行resize
fg = cv2.resize(fg, (int(w*ratio), int(h*ratio)), interpolation=maybe_random_interp(cv2.INTER_NEAREST))
alpha = cv2.resize(alpha, (int(w*ratio), int(h*ratio)),
interpolation=maybe_random_interp(cv2.INTER_NEAREST))
trimap = cv2.resize(trimap, (int(w*ratio), int(h*ratio)), interpolation=cv2.INTER_NEAREST)
bg = cv2.resize(bg, (int(w*ratio), int(h*ratio)), interpolation=maybe_random_interp(cv2.INTER_CUBIC))
h, w = trimap.shape
small_trimap = cv2.resize(trimap, (w//4, h//4), interpolation=cv2.INTER_NEAREST)
unknown_list = list(zip(*np.where(small_trimap[self.margin//4:(h-self.margin)//4,#self.margin为输出大小的一半,w/h是输出大小的1.1,这里的操作是抠出中间0.1的那一小块
self.margin//4:(w-self.margin)//4] == 128)))
unknown_num = len(unknown_list)
if len(unknown_list) < 10:
# self.logger.warning("{} does not have enough unknown area for crop.".format(name))
left_top = (np.random.randint(0, h-self.output_size[0]+1), np.random.randint(0, w-self.output_size[1]+1))#数量少,整张图随机
else:
idx = np.random.randint(unknown_num)#数量多,在那一小块里面挑一个点出来当左上角
left_top = (unknown_list[idx][0]*4, unknown_list[idx][1]*4)
fg_crop = fg[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1],:]
alpha_crop = alpha[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1]]
bg_crop = bg[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1],:]
trimap_crop = trimap[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1]]
if len(np.where(trimap==128)[0]) == 0:#当整张trimap中过渡区域没有的时候,fg/alpha/trimap/bg都直接resize成指定输出大小
self.logger.error("{} does not have enough unknown area for crop. Resized to target size."
"left_top: {}".format(name, left_top))
fg_crop = cv2.resize(fg, self.output_size[::-1], interpolation=maybe_random_interp(cv2.INTER_NEAREST))
alpha_crop = cv2.resize(alpha, self.output_size[::-1], interpolation=maybe_random_interp(cv2.INTER_NEAREST))
trimap_crop = cv2.resize(trimap, self.output_size[::-1], interpolation=cv2.INTER_NEAREST)
bg_crop = cv2.resize(bg, self.output_size[::-1], interpolation=maybe_random_interp(cv2.INTER_CUBIC))
# cv2.imwrite('../tmp/tmp.jpg', fg.astype(np.uint8))
# cv2.imwrite('../tmp/tmp.png', (alpha*255).astype(np.uint8))
# cv2.imwrite('../tmp/tmp2.png', trimap.astype(np.uint8))
# raise ValueError("{} does not have enough unknown area for crop.".format(name))
sample['fg'], sample['alpha'], sample['trimap'] = fg_crop, alpha_crop, trimap_crop
sample['bg'] = bg_crop
return sample
class ResizeAug(object):
def __init__(self):
pass
def __call__(self, sample):
image = sample['image']
rand = np.random.randint(3)
h,w = image.shape[:2]
if(rand == 0):
image = cv2.resize(image, (int(1.5*w), int(1.5*h)), interpolation = cv2.INTER_NEAREST)
image = cv2.resize(image, (w, h), interpolation = cv2.INTER_NEAREST)
elif(rand == 1):
image = cv2.resize(image, (int(1.5*w), int(1.5*h)), interpolation = cv2.INTER_LINEAR)
image = cv2.resize(image, (w, h), interpolation = cv2.INTER_LINEAR)
else:
image = cv2.resize(image, (int(1.5*w), int(1.5*h)), interpolation = cv2.INTER_CUBIC)
image = cv2.resize(image, (w, h), interpolation = cv2.INTER_CUBIC)
sample['image'] = image
return sample
class Rescale(object):
"""
Rescale the image in a sample to a given size.
:param output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, alpha, trimap = sample['image'], sample['alpha'], sample['trimap']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
trimap = cv2.resize(trimap, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
alpha = cv2.resize(alpha, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
sample['image'], sample['alpha'], sample['trimap'] = image, alpha, trimap
return sample
class OriginScale(object):
def __init__(self, phase = "test"):
self.phase = phase
def __call__(self, sample):
if(self.phase == "val"):
h, w = sample["alpha_shape"]
# sample['origin_trimap'] = sample['trimap']
# # if h % 32 == 0 and w % 32 == 0:
# # return sample
# # target_h = h - h % 32
# # target_w = w - w % 32
# target_h = 32 * ((h - 1) // 32 + 1)
# target_w = 32 * ((w - 1) // 32 + 1)
# sample['image'] = cv2.resize(sample['image'], (target_w, target_h), interpolation=cv2.INTER_CUBIC)
# sample['trimap'] = cv2.resize(sample['trimap'], (target_w, target_h), interpolation=cv2.INTER_NEAREST)
if h % 32 == 0 and w % 32 == 0:
return sample
target_h = 32 * ((h - 1) // 32 + 1)
target_w = 32 * ((w - 1) // 32 + 1)
pad_h = target_h - h
pad_w = target_w - w
padded_image = np.pad(sample['image'], ((0,pad_h), (0, pad_w), (0,0)), mode="reflect")
padded_trimap = np.pad(sample['trimap'], ((0,pad_h), (0, pad_w)), mode="reflect")
sample['image'] = padded_image
sample['trimap'] = padded_trimap
else:#test
h, w = sample["alpha_shape"]
# sample['origin_trimap'] = sample['trimap']
# # if h % 32 == 0 and w % 32 == 0:
# # return sample
# # target_h = h - h % 32
# # target_w = w - w % 32
# target_h = 32 * ((h - 1) // 32 + 1)
# target_w = 32 * ((w - 1) // 32 + 1)
# sample['image'] = cv2.resize(sample['image'], (target_w, target_h), interpolation=cv2.INTER_CUBIC)
# sample['trimap'] = cv2.resize(sample['trimap'], (target_w, target_h), interpolation=cv2.INTER_NEAREST)
if h % 32 == 0 and w % 32 == 0:
return sample
target_h = 32 * ((h - 1) // 32 + 1)
target_w = 32 * ((w - 1) // 32 + 1)
pad_h = target_h - h
pad_w = target_w - w
padded_image = np.pad(sample['image'], ((0,pad_h), (0, pad_w), (0,0)), mode="reflect")
padded_trimap = np.pad(sample['trimap'], ((0,pad_h), (0, pad_w)), mode="reflect")
padded_guidancemap = np.pad(sample['guidancemap'], ((0,pad_h), (0, pad_w)), mode="reflect")
sample['image'] = padded_image
sample['trimap'] = padded_trimap
sample['guidancemap'] = padded_guidancemap
return sample
class GenTrimap(object):
def __init__(self):
self.erosion_kernels = [None] + [cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size)) for size in range(1,30)]
def __call__(self, sample):
alpha = sample['alpha']
# Adobe 1K
fg_width = np.random.randint(1, 30)
bg_width = np.random.randint(1, 30)
fg_mask = (alpha + 1e-5).astype(np.int).astype(np.uint8)
bg_mask = (1 - alpha + 1e-5).astype(np.int).astype(np.uint8)
fg_mask = cv2.erode(fg_mask, self.erosion_kernels[fg_width])
bg_mask = cv2.erode(bg_mask, self.erosion_kernels[bg_width])
trimap = np.ones_like(alpha) * 128#tr
trimap[fg_mask == 1] = 255#fg
trimap[bg_mask == 1] = 0#bg
sample['trimap'] = trimap
return sample
class Genclickmap(object):
def __init__(self, r=15, r_extend = 50, point_num=10):
self.r = r
self.r_extend = r_extend
self.m = point_num
def __call__(self, sample):
trimap = sample['trimap']
#前景采样
fg_click_num = np.random.randint(1, self.m)#最少1个,最多m-1个
fg_click_num_ = copy.deepcopy(fg_click_num)
trimap_fg_sample = copy.deepcopy(trimap)
fg_points = []
while(fg_click_num_>0):
fg_area = list(zip(*np.where(trimap_fg_sample==255)))#找出前景区域的坐标
fg_num = len(fg_area)
if(fg_num>0):
point_chosen = fg_area[np.random.randint(fg_num)]
fg_points.append(point_chosen)
cv2.circle(trimap_fg_sample, (point_chosen[1], point_chosen[0]), self.r_extend, 128, -1)
fg_click_num_-=1
else:
break#没点了,剩下的数量用来采样背景
#背景采样
bg_click_num = self.m - fg_click_num
bg_click_num_ = copy.deepcopy(bg_click_num)
trimap_bg_sample = copy.deepcopy(trimap)
bg_points = []
while(bg_click_num_>0):
bg_area = list(zip(*np.where(trimap_bg_sample==0)))#找出前景区域的坐标
bg_num = len(bg_area)
if(bg_num>0):
point_chosen = bg_area[np.random.randint(bg_num)]
bg_points.append(point_chosen)
cv2.circle(trimap_bg_sample, (point_chosen[1], point_chosen[0]), self.r_extend, 128, -1)
bg_click_num_-=1
else:
break#如果连背景都没了那就不采了
#画采样图 fg 255 bg 128 tr 0
# clickmap = np.zeros_like(trimap)
# # clickmap = trimap
# for point in fg_points:
# cv2.circle(clickmap, (point[1], point[0]), self.r, 255, -1)
# for point in bg_points:
# cv2.circle(clickmap, (point[1], point[0]), self.r, 128, -1)
#把随机出来的坐标放在tensor里
fg_points_tensor = torch.ones([10,2])*9999
bg_points_tensor = torch.ones([10,2])*9999
for i, point in enumerate(fg_points):
fg_points_tensor[i] = torch.tensor(point)
for i, point in enumerate(bg_points):
bg_points_tensor[i] = torch.tensor(point)
# sample['clickmap'] = clickmap
sample["fg_points"] = fg_points_tensor.long()
sample["bg_points"] = bg_points_tensor.long()
return sample
class Composite(object):
def __call__(self, sample):
fg, bg, alpha = sample['fg'], sample['bg'], sample['alpha']
alpha[alpha < 0 ] = 0
alpha[alpha > 1] = 1
fg[fg < 0 ] = 0
fg[fg > 255] = 255
bg[bg < 0 ] = 0
bg[bg > 255] = 255
image = fg * alpha[:, :, None] + bg * (1 - alpha[:, :, None])
sample['image'] = image
return sample
class DataGenerator(Dataset):
def __init__(self, data, phase="train", test_scale="resize"):
self.phase = phase
self.crop_size = crop_size
self.alpha = data.alpha
if self.phase == "train":
self.fg = data.fg
self.bg = data.bg
self.merged = []
self.trimap = []
else:
self.fg = []
self.bg = []
self.merged = data.merged
self.trimap = data.trimap
self.guidancemap = data.guidancemap
if augmentation:
train_trans = [
RandomAffine(degrees=30, scale=[0.8, 1.25], shear=10, flip=0.5),#对alpha和fg做仿射变换
GenTrimap(),#用alpha图产生trimap, 长宽和alpha图一样
RandomCrop((self.crop_size, self.crop_size)),
RandomJitter(),
Genclickmap(radius, radius_extend, num_points),
Composite(),#将fg/bg/alpha组合成为image
ResizeAug(),#RI 只对image
ToTensor(phase="train")]
else:
train_trans = [ GenTrimap(),
RandomCrop((self.crop_size, self.crop_size)),
Composite(),
ResizeAug(),
ToTensor(phase="train")]
if test_scale.lower() == "origin":
test_trans = [ OriginScale(), ToTensor(phase="test") ]
elif test_scale.lower() == "resize":
test_trans = [ Rescale((self.crop_size, self.crop_size)), ToTensor(phase="test") ]
elif test_scale.lower() == "crop":
test_trans = [ RandomCrop((self.crop_size, self.crop_size)), ToTensor(phase="test") ]
else:
raise NotImplementedError("test_scale {} not implemented".format(test_scale))
self.transform = {
'train':
transforms.Compose(train_trans),
'val':
transforms.Compose([
OriginScale(phase="val"),
ToTensor(phase="val")
]),
'test':
transforms.Compose(test_trans)
}[phase]
self.fg_num = len(self.fg)
self.erosion_kernels = [None] + [cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size)) for size in range(1,20)]
def __getitem__(self, idx):
if self.phase == "train":
fg = cv2.imread(self.fg[idx % self.fg_num])
alpha = cv2.imread(self.alpha[idx % self.fg_num], 0).astype(np.float32)/255
bg = cv2.imread(self.bg[idx], 1)
if augmentation:
fg, alpha = self._composite_fg(fg, alpha, idx)
pass
image_name = os.path.split(self.fg[idx % self.fg_num])[-1]
sample = {'fg': fg, 'alpha': alpha, 'bg': bg, 'image_name': image_name}
elif self.phase == "val":
image = cv2.imread(self.merged[idx])
# image = cv2.imread(self.merged[idx].replace('png','jpg'))
# print('======',self.merged[idx].replace('png','jpg'))
# alpha = cv2.imread(self.alpha[idx].replace('png','jpg'), 0)/255.
alpha = cv2.imread(self.alpha[idx], 0)/255.
trimap = cv2.imread(self.trimap[idx], 0)
image_name = os.path.split(self.merged[idx])[-1]
sample = {'image': image, 'alpha': alpha, 'trimap': trimap, 'image_name': image_name, 'alpha_shape': alpha.shape}
else:#test
image = cv2.imread(self.merged[idx])
# image = cv2.imread(self.merged[idx].replace('png','jpg'))
# print('======',self.merged[idx].replace('png','jpg'))
# alpha = cv2.imread(self.alpha[idx].replace('png','jpg'), 0)/255.
alpha = cv2.imread(self.alpha[idx], 0)/255.
if self.guidancemap == None:
guidancemap_ = np.ones_like(alpha).astype(np.uint8)*128
else:
guidancemap_ = cv2.imread(self.guidancemap[idx], 0)
trimap = cv2.imread(self.trimap[idx], 0)
image_name = os.path.split(self.merged[idx])[-1]
sample = {'image': image, 'alpha': alpha, 'trimap': trimap, 'guidancemap': guidancemap_, 'image_name': image_name, 'alpha_shape': alpha.shape}
sample = self.transform(sample)
return sample
def _composite_fg(self, fg, alpha, idx):
if np.random.rand() < 0.5:
idx2 = np.random.randint(self.fg_num) + idx
fg2 = cv2.imread(self.fg[idx2 % self.fg_num])
alpha2 = cv2.imread(self.alpha[idx2 % self.fg_num], 0).astype(np.float32)/255.
h, w = alpha.shape
fg2 = cv2.resize(fg2, (w, h), interpolation=maybe_random_interp(cv2.INTER_NEAREST))
alpha2 = cv2.resize(alpha2, (w, h), interpolation=maybe_random_interp(cv2.INTER_NEAREST))
alpha_tmp = 1 - (1 - alpha) * (1 - alpha2)
if np.any(alpha_tmp < 1):
fg = fg.astype(np.float32) * alpha[:,:,None] + fg2.astype(np.float32) * (1 - alpha[:,:,None])
# The overlap of two 50% transparency should be 25%
alpha = alpha_tmp
fg = fg.astype(np.uint8)
if np.random.rand() < 0.25:
fg = cv2.resize(fg, (640, 640), interpolation=maybe_random_interp(cv2.INTER_NEAREST))
alpha = cv2.resize(alpha, (640, 640), interpolation=maybe_random_interp(cv2.INTER_NEAREST))
return fg, alpha
def __len__(self):
if self.phase == "train":
return len(self.bg)
else:
return len(self.alpha)
if __name__ == '__main__':
pass
| 31,357 | 40.699468 | 155 | py |
FGI-Matting | FGI-Matting-main/dataloader/Test_dataset/prefetcher.py | import torch
class Prefetcher():
"""
Modified from the data_prefetcher in https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py
"""
def __init__(self, loader):
self.orig_loader = loader
self.stream = torch.cuda.Stream()
self.next_sample = None
def preload(self):
try:
self.next_sample = next(self.loader)
except StopIteration:
self.next_sample = None
return
with torch.cuda.stream(self.stream):
for key, value in self.next_sample.items():
if isinstance(value, torch.Tensor):
self.next_sample[key] = value.cuda(non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
sample = self.next_sample
if sample is not None:
for key, value in sample.items():
if isinstance(value, torch.Tensor):
sample[key].record_stream(torch.cuda.current_stream())
self.preload()
else:
# throw stop exception if there is no more data to perform as a default dataloader
raise StopIteration("No samples in loader. example: `iterator = iter(Prefetcher(loader)); "
"data = next(iterator)`")
return sample
def __iter__(self):
self.loader = iter(self.orig_loader)
self.preload()
return self
| 1,461 | 33 | 113 | py |
FGI-Matting | FGI-Matting-main/dataloader/Test_dataset/Test_dataset.py | import torch
from torch.utils.data import DataLoader
import cv2
import numpy as np
from .image_file import ImageFileTrain, ImageFileTest
from .data_generator import DataGenerator
from .prefetcher import Prefetcher
from utils import CONFIG
def get_Test_dataloader():
test_merged = CONFIG.test.test_merged
test_alpha = CONFIG.test.test_alpha
test_trimap =CONFIG.test.test_trimap
test_clickmap = CONFIG.test.test_clickmap
test_scribblemap = CONFIG.test.test_scribblemap
test_guidancemap = None
if CONFIG.test.guidancemap_phase == "trimap":
test_guidancemap = test_trimap
elif CONFIG.test.guidancemap_phase == "clickmap":
test_guidancemap = test_clickmap
elif CONFIG.test.guidancemap_phase == "scribblemap":
test_guidancemap = test_scribblemap
elif CONFIG.test.guidancemap_phase == "No_guidance":
test_guidancemap = None
else:
NotImplementedError("Unknown guidancemap type")
print(test_merged)
print(test_alpha)
print(test_trimap)
test_image_file = ImageFileTest(alpha_dir=test_alpha,
merged_dir=test_merged,
trimap_dir=test_trimap,
guidancemap_dir = test_guidancemap)
test_dataset = DataGenerator(test_image_file, phase='test', test_scale= "origin")
test_dataloader = DataLoader(test_dataset,
batch_size= 1,
shuffle=False,
num_workers=4,
drop_last=False)
return test_dataloader
if __name__=="__main__":
train_loader, testloader = get_DIM_click_gradual_change_dataloader(2,4)
for i, data in enumerate(train_loader):
image = data['image'][0]
clickmap = data['clickmap'][0]
alpha = data['alpha'][0]
# cv2.imshow('image', image.numpy().transpose(1,2,0))
# cv2.imshow('trimap', trimap.numpy())
# cv2.imshow('alpha', alpha.numpy())
# cv2.waitKey(0)
# cv2.destroyAllWindows()
print('image', image.size())
print('clickmap', clickmap.size())
print('alpha', alpha.size())
| 2,266 | 28.828947 | 85 | py |
arc | arc-master/third_party/cqr/torch_models.py |
import sys
import copy
import torch
import numpy as np
import torch.nn as nn
from cqr import helper
from sklearn.model_selection import train_test_split
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
###############################################################################
# Helper functions
###############################################################################
def epoch_internal_train(model, loss_func, x_train, y_train, batch_size, optimizer, cnt=0, best_cnt=np.Inf):
""" Sweep over the data and update the model's parameters
Parameters
----------
model : class of neural net model
loss_func : class of loss function
x_train : pytorch tensor n training features, each of dimension p (nXp)
batch_size : integer, size of the mini-batch
optimizer : class of SGD solver
cnt : integer, counting the gradient steps
best_cnt: integer, stop the training if current cnt > best_cnt
Returns
-------
epoch_loss : mean loss value
cnt : integer, cumulative number of gradient steps
"""
model.train()
shuffle_idx = np.arange(x_train.shape[0])
np.random.shuffle(shuffle_idx)
x_train = x_train[shuffle_idx]
y_train = y_train[shuffle_idx]
epoch_losses = []
for idx in range(0, x_train.shape[0], batch_size):
cnt = cnt + 1
optimizer.zero_grad()
batch_x = x_train[idx : min(idx + batch_size, x_train.shape[0]),:]
batch_y = y_train[idx : min(idx + batch_size, y_train.shape[0])]
preds = model(batch_x)
loss = loss_func(preds, batch_y)
loss.backward()
optimizer.step()
epoch_losses.append(loss.cpu().detach().numpy())
if cnt >= best_cnt:
break
epoch_loss = np.mean(epoch_losses)
return epoch_loss, cnt
def rearrange(all_quantiles, quantile_low, quantile_high, test_preds):
""" Produce monotonic quantiles
Parameters
----------
all_quantiles : numpy array (q), grid of quantile levels in the range (0,1)
quantile_low : float, desired low quantile in the range (0,1)
quantile_high : float, desired high quantile in the range (0,1)
test_preds : numpy array of predicted quantile (nXq)
Returns
-------
q_fixed : numpy array (nX2), containing the rearranged estimates of the
desired low and high quantile
References
----------
.. [1] Chernozhukov, Victor, Iván Fernández‐Val, and Alfred Galichon.
"Quantile and probability curves without crossing."
Econometrica 78.3 (2010): 1093-1125.
"""
scaling = all_quantiles[-1] - all_quantiles[0]
low_val = (quantile_low - all_quantiles[0])/scaling
high_val = (quantile_high - all_quantiles[0])/scaling
q_fixed = np.quantile(test_preds,(low_val, high_val),interpolation='linear',axis=1)
return q_fixed.T
###############################################################################
# Deep conditional mean regression
# Minimizing MSE loss
###############################################################################
# Define the network
class mse_model(nn.Module):
""" Conditional mean estimator, formulated as neural net
"""
def __init__(self,
in_shape=1,
hidden_size=64,
dropout=0.5):
""" Initialization
Parameters
----------
in_shape : integer, input signal dimension (p)
hidden_size : integer, hidden layer dimension
dropout : float, dropout rate
"""
super().__init__()
self.in_shape = in_shape
self.out_shape = 1
self.hidden_size = hidden_size
self.dropout = dropout
self.build_model()
self.init_weights()
def build_model(self):
""" Construct the network
"""
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, 1),
)
def init_weights(self):
""" Initialize the network parameters
"""
for m in self.base_model:
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
""" Run forward pass
"""
return torch.squeeze(self.base_model(x))
# Define the training procedure
class LearnerOptimized:
""" Fit a neural network (conditional mean) to training data
"""
def __init__(self, model, optimizer_class, loss_func, device='cpu', test_ratio=0.2, random_state=0):
""" Initialization
Parameters
----------
model : class of neural network model
optimizer_class : class of SGD optimizer (e.g. Adam)
loss_func : loss to minimize
device : string, "cuda:0" or "cpu"
test_ratio : float, test size used in cross-validation (CV)
random_state : int, seed to be used in CV when splitting to train-test
"""
self.model = model.to(device)
self.optimizer_class = optimizer_class
self.optimizer = optimizer_class(self.model.parameters())
self.loss_func = loss_func.to(device)
self.device = device
self.test_ratio = test_ratio
self.random_state = random_state
self.loss_history = []
self.test_loss_history = []
self.full_loss_history = []
def fit(self, x, y, epochs, batch_size, verbose=False):
""" Fit the model to data
Parameters
----------
x : numpy array, containing the training features (nXp)
y : numpy array, containing the training labels (n)
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
"""
sys.stdout.flush()
model = copy.deepcopy(self.model)
model = model.to(device)
optimizer = self.optimizer_class(model.parameters())
best_epoch = epochs
x_train, xx, y_train, yy = train_test_split(x, y, test_size=self.test_ratio,random_state=self.random_state)
x_train = torch.from_numpy(x_train).float().to(self.device).requires_grad_(False)
xx = torch.from_numpy(xx).float().to(self.device).requires_grad_(False)
y_train = torch.from_numpy(y_train).float().to(self.device).requires_grad_(False)
yy = torch.from_numpy(yy).float().to(self.device).requires_grad_(False)
best_cnt = 1e10
best_test_epoch_loss = 1e10
cnt = 0
for e in range(epochs):
epoch_loss, cnt = epoch_internal_train(model, self.loss_func, x_train, y_train, batch_size, optimizer, cnt)
self.loss_history.append(epoch_loss)
# test
model.eval()
preds = model(xx)
test_preds = preds.cpu().detach().numpy()
test_preds = np.squeeze(test_preds)
test_epoch_loss = self.loss_func(preds, yy).cpu().detach().numpy()
self.test_loss_history.append(test_epoch_loss)
if (test_epoch_loss <= best_test_epoch_loss):
best_test_epoch_loss = test_epoch_loss
best_epoch = e
best_cnt = cnt
if (e+1) % 100 == 0 and verbose:
print("CV: Epoch {}: Train {}, Test {}, Best epoch {}, Best loss {}".format(e+1, epoch_loss, test_epoch_loss, best_epoch, best_test_epoch_loss))
sys.stdout.flush()
# use all the data to train the model, for best_cnt steps
x = torch.from_numpy(x).float().to(self.device).requires_grad_(False)
y = torch.from_numpy(y).float().to(self.device).requires_grad_(False)
cnt = 0
for e in range(best_epoch+1):
if cnt > best_cnt:
break
epoch_loss, cnt = epoch_internal_train(self.model, self.loss_func, x, y, batch_size, self.optimizer, cnt, best_cnt)
self.full_loss_history.append(epoch_loss)
if (e+1) % 100 == 0 and verbose:
print("Full: Epoch {}: {}, cnt {}".format(e+1, epoch_loss, cnt))
sys.stdout.flush()
def predict(self, x):
""" Estimate the label given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of predicted labels (n)
"""
self.model.eval()
ret_val = self.model(torch.from_numpy(x).to(self.device).requires_grad_(False)).cpu().detach().numpy()
return ret_val
##############################################################################
# Quantile regression
# Implementation inspired by:
# https://github.com/ceshine/quantile-regression-tensorflow
##############################################################################
class AllQuantileLoss(nn.Module):
""" Pinball loss function
"""
def __init__(self, quantiles):
""" Initialize
Parameters
----------
quantiles : pytorch vector of quantile levels, each in the range (0,1)
"""
super().__init__()
self.quantiles = quantiles
def forward(self, preds, target):
""" Compute the pinball loss
Parameters
----------
preds : pytorch tensor of estimated labels (n)
target : pytorch tensor of true labels (n)
Returns
-------
loss : cost function value
"""
assert not target.requires_grad
assert preds.size(0) == target.size(0)
losses = []
for i, q in enumerate(self.quantiles):
errors = target - preds[:, i]
losses.append(torch.max((q-1) * errors, q * errors).unsqueeze(1))
loss = torch.mean(torch.sum(torch.cat(losses, dim=1), dim=1))
return loss
class all_q_model(nn.Module):
""" Conditional quantile estimator, formulated as neural net
"""
def __init__(self,
quantiles,
in_shape=1,
hidden_size=64,
dropout=0.5):
""" Initialization
Parameters
----------
quantiles : numpy array of quantile levels (q), each in the range (0,1)
in_shape : integer, input signal dimension (p)
hidden_size : integer, hidden layer dimension
dropout : float, dropout rate
"""
super().__init__()
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.hidden_size = hidden_size
self.in_shape = in_shape
self.out_shape = len(quantiles)
self.dropout = dropout
self.build_model()
self.init_weights()
def build_model(self):
""" Construct the network
"""
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.num_quantiles),
)
def init_weights(self):
""" Initialize the network parameters
"""
for m in self.base_model:
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
""" Run forward pass
"""
return self.base_model(x)
class LearnerOptimizedCrossing:
""" Fit a neural network (conditional quantile) to training data
"""
def __init__(self, model, optimizer_class, loss_func, device='cpu', test_ratio=0.2, random_state=0,
qlow=0.05, qhigh=0.95, use_rearrangement=False):
""" Initialization
Parameters
----------
model : class of neural network model
optimizer_class : class of SGD optimizer (e.g. pytorch's Adam)
loss_func : loss to minimize
device : string, "cuda:0" or "cpu"
test_ratio : float, test size used in cross-validation (CV)
random_state : integer, seed used in CV when splitting to train-test
qlow : float, low quantile level in the range (0,1)
qhigh : float, high quantile level in the range (0,1)
use_rearrangement : boolean, use the rearrangement algorithm (True)
of not (False)
"""
self.model = model.to(device)
self.use_rearrangement = use_rearrangement
self.compute_coverage = True
self.quantile_low = qlow
self.quantile_high = qhigh
self.target_coverage = 100.0*(self.quantile_high - self.quantile_low)
self.all_quantiles = loss_func.quantiles
self.optimizer_class = optimizer_class
self.optimizer = optimizer_class(self.model.parameters())
self.loss_func = loss_func.to(device)
self.device = device
self.test_ratio = test_ratio
self.random_state = random_state
self.loss_history = []
self.test_loss_history = []
self.full_loss_history = []
def fit(self, x, y, epochs, batch_size, verbose=False):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size used in SGD solver
"""
sys.stdout.flush()
model = copy.deepcopy(self.model)
model = model.to(device)
optimizer = self.optimizer_class(model.parameters())
best_epoch = epochs
x_train, xx, y_train, yy = train_test_split(x,
y,
test_size=self.test_ratio,
random_state=self.random_state)
x_train = torch.from_numpy(x_train).float().to(self.device).requires_grad_(False)
xx = torch.from_numpy(xx).float().to(self.device).requires_grad_(False)
y_train = torch.from_numpy(y_train).float().to(self.device).requires_grad_(False)
yy_cpu = yy
yy = torch.from_numpy(yy).float().to(self.device).requires_grad_(False)
best_avg_length = 1e10
best_coverage = 0
best_cnt = 1e10
cnt = 0
for e in range(epochs):
model.train()
epoch_loss, cnt = epoch_internal_train(model, self.loss_func, x_train, y_train, batch_size, optimizer, cnt)
self.loss_history.append(epoch_loss)
model.eval()
preds = model(xx)
test_epoch_loss = self.loss_func(preds, yy).cpu().detach().numpy()
self.test_loss_history.append(test_epoch_loss)
test_preds = preds.cpu().detach().numpy()
test_preds = np.squeeze(test_preds)
if self.use_rearrangement:
test_preds = rearrange(self.all_quantiles, self.quantile_low, self.quantile_high, test_preds)
y_lower = test_preds[:,0]
y_upper = test_preds[:,1]
coverage, avg_length = helper.compute_coverage_len(yy_cpu, y_lower, y_upper)
if (coverage >= self.target_coverage) and (avg_length < best_avg_length):
best_avg_length = avg_length
best_coverage = coverage
best_epoch = e
best_cnt = cnt
if (e+1) % 100 == 0 and verbose:
print("CV: Epoch {}: Train {}, Test {}, Best epoch {}, Best Coverage {} Best Length {} Cur Coverage {}".format(e+1, epoch_loss, test_epoch_loss, best_epoch, best_coverage, best_avg_length, coverage))
sys.stdout.flush()
x = torch.from_numpy(x).float().to(self.device).requires_grad_(False)
y = torch.from_numpy(y).float().to(self.device).requires_grad_(False)
cnt = 0
for e in range(best_epoch+1):
if cnt > best_cnt:
break
epoch_loss, cnt = epoch_internal_train(self.model, self.loss_func, x, y, batch_size, self.optimizer, cnt, best_cnt)
self.full_loss_history.append(epoch_loss)
if (e+1) % 100 == 0 and verbose:
print("Full: Epoch {}: {}, cnt {}".format(e+1, epoch_loss, cnt))
sys.stdout.flush()
def predict(self, x):
""" Estimate the conditional low and high quantile given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
test_preds : numpy array of predicted low and high quantiles (nX2)
"""
self.model.eval()
test_preds = self.model(torch.from_numpy(x).to(self.device).requires_grad_(False)).cpu().detach().numpy()
if self.use_rearrangement:
test_preds = rearrange(self.all_quantiles, self.quantile_low, self.quantile_high, test_preds)
else:
test_preds[:,0] = np.min(test_preds,axis=1)
test_preds[:,1] = np.max(test_preds,axis=1)
return test_preds
| 17,313 | 33.217391 | 215 | py |
arc | arc-master/third_party/cqr/helper.py |
import sys
import torch
import numpy as np
from cqr import torch_models
from functools import partial
from cqr import tune_params_cv
from nonconformist.cp import IcpRegressor
from nonconformist.base import RegressorAdapter
from skgarden import RandomForestQuantileRegressor
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
def compute_coverage_len(y_test, y_lower, y_upper):
""" Compute average coverage and length of prediction intervals
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
coverage = in_the_range / len(y_test) * 100
avg_length = np.mean(abs(y_upper - y_lower))
return coverage, avg_length
def run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance, condition=None):
""" Run split conformal method
Parameters
----------
nc : class of nonconformist object
X_train : numpy array, training features (n1Xp)
y_train : numpy array, training labels (n1)
X_test : numpy array, testing features (n2Xp)
idx_train : numpy array, indices of proper training set examples
idx_cal : numpy array, indices of calibration set examples
significance : float, significance level (e.g. 0.1)
condition : function, mapping feature vector to group id
Returns
-------
y_lower : numpy array, estimated lower bound for the labels (n2)
y_upper : numpy array, estimated upper bound for the labels (n2)
"""
icp = IcpRegressor(nc,condition=condition)
# Fit the ICP using the proper training set
icp.fit(X_train[idx_train,:], y_train[idx_train])
# Calibrate the ICP using the calibration set
icp.calibrate(X_train[idx_cal,:], y_train[idx_cal])
# Produce predictions for the test set, with confidence 90%
predictions = icp.predict(X_test, significance=significance)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
return y_lower, y_upper
def run_icp_sep(nc, X_train, y_train, X_test, idx_train, idx_cal, significance, condition):
""" Run split conformal method, train a seperate regressor for each group
Parameters
----------
nc : class of nonconformist object
X_train : numpy array, training features (n1Xp)
y_train : numpy array, training labels (n1)
X_test : numpy array, testing features (n2Xp)
idx_train : numpy array, indices of proper training set examples
idx_cal : numpy array, indices of calibration set examples
significance : float, significance level (e.g. 0.1)
condition : function, mapping a feature vector to group id
Returns
-------
y_lower : numpy array, estimated lower bound for the labels (n2)
y_upper : numpy array, estimated upper bound for the labels (n2)
"""
X_proper_train = X_train[idx_train,:]
y_proper_train = y_train[idx_train]
X_calibration = X_train[idx_cal,:]
y_calibration = y_train[idx_cal]
category_map_proper_train = np.array([condition((X_proper_train[i, :], y_proper_train[i])) for i in range(y_proper_train.size)])
category_map_calibration = np.array([condition((X_calibration[i, :], y_calibration[i])) for i in range(y_calibration.size)])
category_map_test = np.array([condition((X_test[i, :], None)) for i in range(X_test.shape[0])])
categories = np.unique(category_map_proper_train)
y_lower = np.zeros(X_test.shape[0])
y_upper = np.zeros(X_test.shape[0])
cnt = 0
for cond in categories:
icp = IcpRegressor(nc[cnt])
idx_proper_train_group = category_map_proper_train == cond
# Fit the ICP using the proper training set
icp.fit(X_proper_train[idx_proper_train_group,:], y_proper_train[idx_proper_train_group])
idx_calibration_group = category_map_calibration == cond
# Calibrate the ICP using the calibration set
icp.calibrate(X_calibration[idx_calibration_group,:], y_calibration[idx_calibration_group])
idx_test_group = category_map_test == cond
# Produce predictions for the test set, with confidence 90%
predictions = icp.predict(X_test[idx_test_group,:], significance=significance)
y_lower[idx_test_group] = predictions[:,0]
y_upper[idx_test_group] = predictions[:,1]
cnt = cnt + 1
return y_lower, y_upper
def compute_coverage(y_test,y_lower,y_upper,significance,name=""):
""" Compute average coverage and length, and print results
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
significance : float, desired significance level
name : string, optional output string (e.g. the method name)
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
coverage = in_the_range / len(y_test) * 100
print("%s: Percentage in the range (expecting %.2f): %f" % (name, 100 - significance*100, coverage))
sys.stdout.flush()
avg_length = abs(np.mean(y_lower - y_upper))
print("%s: Average length: %f" % (name, avg_length))
sys.stdout.flush()
return coverage, avg_length
def compute_coverage_per_sample(y_test,y_lower,y_upper,significance,name="",x_test=None,condition=None):
""" Compute average coverage and length, and print results
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
significance : float, desired significance level
name : string, optional output string (e.g. the method name)
x_test : numpy array, test features
condition : function, mapping a feature vector to group id
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
if condition is not None:
category_map = np.array([condition((x_test[i, :], y_test[i])) for i in range(y_test.size)])
categories = np.unique(category_map)
coverage = np.empty(len(categories), dtype=np.object)
length = np.empty(len(categories), dtype=np.object)
cnt = 0
for cond in categories:
idx = category_map == cond
coverage[cnt] = (y_test[idx] >= y_lower[idx]) & (y_test[idx] <= y_upper[idx])
coverage_avg = np.sum( coverage[cnt] ) / len(y_test[idx]) * 100
print("%s: Group %d : Percentage in the range (expecting %.2f): %f" % (name, cond, 100 - significance*100, coverage_avg))
sys.stdout.flush()
length[cnt] = abs(y_upper[idx] - y_lower[idx])
print("%s: Group %d : Average length: %f" % (name, cond, np.mean(length[cnt])))
sys.stdout.flush()
cnt = cnt + 1
else:
coverage = (y_test >= y_lower) & (y_test <= y_upper)
coverage_avg = np.sum(coverage) / len(y_test) * 100
print("%s: Percentage in the range (expecting %.2f): %f" % (name, 100 - significance*100, coverage_avg))
sys.stdout.flush()
length = abs(y_upper - y_lower)
print("%s: Average length: %f" % (name, np.mean(length)))
sys.stdout.flush()
return coverage, length
def plot_func_data(y_test,y_lower,y_upper,name=""):
""" Plot the test labels along with the constructed prediction band
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
name : string, optional output string (e.g. the method name)
"""
# allowed to import graphics
import matplotlib.pyplot as plt
interval = y_upper - y_lower
sort_ind = np.argsort(interval)
y_test_sorted = y_test[sort_ind]
upper_sorted = y_upper[sort_ind]
lower_sorted = y_lower[sort_ind]
mean = (upper_sorted + lower_sorted) / 2
# Center such that the mean of the prediction interval is at 0.0
y_test_sorted -= mean
upper_sorted -= mean
lower_sorted -= mean
plt.plot(y_test_sorted, "ro")
plt.fill_between(
np.arange(len(upper_sorted)), lower_sorted, upper_sorted, alpha=0.2, color="r",
label="Pred. interval")
plt.xlabel("Ordered samples")
plt.ylabel("Values and prediction intervals")
plt.title(name)
plt.show()
interval = y_upper - y_lower
sort_ind = np.argsort(y_test)
y_test_sorted = y_test[sort_ind]
upper_sorted = y_upper[sort_ind]
lower_sorted = y_lower[sort_ind]
plt.plot(y_test_sorted, "ro")
plt.fill_between(
np.arange(len(upper_sorted)), lower_sorted, upper_sorted, alpha=0.2, color="r",
label="Pred. interval")
plt.xlabel("Ordered samples by response")
plt.ylabel("Values and prediction intervals")
plt.title(name)
plt.show()
###############################################################################
# Deep conditional mean regression
# Minimizing MSE loss
###############################################################################
class MSENet_RegressorAdapter(RegressorAdapter):
""" Conditional mean estimator, formulated as neural net
"""
def __init__(self,
model,
fit_params=None,
in_shape=1,
hidden_size=1,
learn_func=torch.optim.Adam,
epochs=1000,
batch_size=10,
dropout=0.1,
lr=0.01,
wd=1e-6,
test_ratio=0.2,
random_state=0):
""" Initialization
Parameters
----------
model : unused parameter (for compatibility with nc class)
fit_params : unused parameter (for compatibility with nc class)
in_shape : integer, input signal dimension
hidden_size : integer, hidden layer dimension
learn_func : class of Pytorch's SGD optimizer
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
dropout : float, dropout rate
lr : float, learning rate for SGD
wd : float, weight decay
test_ratio : float, ratio of held-out data, used in cross-validation
random_state : integer, seed for splitting the data in cross-validation
"""
super(MSENet_RegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.epochs = epochs
self.batch_size = batch_size
self.dropout = dropout
self.lr = lr
self.wd = wd
self.test_ratio = test_ratio
self.random_state = random_state
self.model = torch_models.mse_model(in_shape=in_shape, hidden_size=hidden_size, dropout=dropout)
self.loss_func = torch.nn.MSELoss()
self.learner = torch_models.LearnerOptimized(self.model,
partial(learn_func, lr=lr, weight_decay=wd),
self.loss_func,
device=device,
test_ratio=self.test_ratio,
random_state=self.random_state)
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
self.learner.fit(x, y, self.epochs, batch_size=self.batch_size)
def predict(self, x):
""" Estimate the label given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of predicted labels (n)
"""
return self.learner.predict(x)
###############################################################################
# Deep neural network for conditional quantile regression
# Minimizing pinball loss
###############################################################################
class AllQNet_RegressorAdapter(RegressorAdapter):
""" Conditional quantile estimator, formulated as neural net
"""
def __init__(self,
model,
fit_params=None,
in_shape=1,
hidden_size=1,
quantiles=[.05, .95],
learn_func=torch.optim.Adam,
epochs=1000,
batch_size=10,
dropout=0.1,
lr=0.01,
wd=1e-6,
test_ratio=0.2,
random_state=0,
use_rearrangement=False):
""" Initialization
Parameters
----------
model : None, unused parameter (for compatibility with nc class)
fit_params : None, unused parameter (for compatibility with nc class)
in_shape : integer, input signal dimension
hidden_size : integer, hidden layer dimension
quantiles : numpy array, low and high quantile levels in range (0,1)
learn_func : class of Pytorch's SGD optimizer
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
dropout : float, dropout rate
lr : float, learning rate for SGD
wd : float, weight decay
test_ratio : float, ratio of held-out data, used in cross-validation
random_state : integer, seed for splitting the data in cross-validation
use_rearrangement : boolean, use the rearrangement algorithm (True)
of not (False). See reference [1].
References
----------
.. [1] Chernozhukov, Victor, Iván Fernández‐Val, and Alfred Galichon.
"Quantile and probability curves without crossing."
Econometrica 78.3 (2010): 1093-1125.
"""
super(AllQNet_RegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
if use_rearrangement:
self.all_quantiles = torch.from_numpy(np.linspace(0.01,0.99,99)).float()
else:
self.all_quantiles = self.quantiles
self.epochs = epochs
self.batch_size = batch_size
self.dropout = dropout
self.lr = lr
self.wd = wd
self.test_ratio = test_ratio
self.random_state = random_state
self.model = torch_models.all_q_model(quantiles=self.all_quantiles,
in_shape=in_shape,
hidden_size=hidden_size,
dropout=dropout)
self.loss_func = torch_models.AllQuantileLoss(self.all_quantiles)
self.learner = torch_models.LearnerOptimizedCrossing(self.model,
partial(learn_func, lr=lr, weight_decay=wd),
self.loss_func,
device=device,
test_ratio=self.test_ratio,
random_state=self.random_state,
qlow=self.quantiles[0],
qhigh=self.quantiles[1],
use_rearrangement=use_rearrangement)
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
self.learner.fit(x, y, self.epochs, self.batch_size)
def predict(self, x):
""" Estimate the conditional low and high quantiles given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of estimated conditional quantiles (nX2)
"""
return self.learner.predict(x)
###############################################################################
# Quantile random forests model
###############################################################################
class QuantileForestRegressorAdapter(RegressorAdapter):
""" Conditional quantile estimator, defined as quantile random forests (QRF)
References
----------
.. [1] Meinshausen, Nicolai. "Quantile regression forests."
Journal of Machine Learning Research 7.Jun (2006): 983-999.
"""
def __init__(self,
model,
fit_params=None,
quantiles=[5, 95],
params=None):
""" Initialization
Parameters
----------
model : None, unused parameter (for compatibility with nc class)
fit_params : None, unused parameter (for compatibility with nc class)
quantiles : numpy array, low and high quantile levels in range (0,100)
params : dictionary of parameters
params["random_state"] : integer, seed for splitting the data
in cross-validation. Also used as the
seed in quantile random forests (QRF)
params["min_samples_leaf"] : integer, parameter of QRF
params["n_estimators"] : integer, parameter of QRF
params["max_features"] : integer, parameter of QRF
params["CV"] : boolean, use cross-validation (True) or
not (False) to tune the two QRF quantile levels
to obtain the desired coverage
params["test_ratio"] : float, ratio of held-out data, used
in cross-validation
params["coverage_factor"] : float, to avoid too conservative
estimation of the prediction band,
when tuning the two QRF quantile
levels in cross-validation one may
ask for prediction intervals with
reduced average coverage, equal to
coverage_factor*(q_high - q_low).
params["range_vals"] : float, determines the lowest and highest
quantile level parameters when tuning
the quanitle levels bt cross-validation.
The smallest value is equal to
quantiles[0] - range_vals.
Similarly, the largest is equal to
quantiles[1] + range_vals.
params["num_vals"] : integer, when tuning QRF's quantile
parameters, sweep over a grid of length
num_vals.
"""
super(QuantileForestRegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
self.cv_quantiles = self.quantiles
self.params = params
self.rfqr = RandomForestQuantileRegressor(random_state=params["random_state"],
min_samples_leaf=params["min_samples_leaf"],
n_estimators=params["n_estimators"],
max_features=params["max_features"])
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
if self.params["CV"]:
target_coverage = self.quantiles[1] - self.quantiles[0]
coverage_factor = self.params["coverage_factor"]
range_vals = self.params["range_vals"]
num_vals = self.params["num_vals"]
grid_q_low = np.linspace(self.quantiles[0],self.quantiles[0]+range_vals,num_vals).reshape(-1,1)
grid_q_high = np.linspace(self.quantiles[1],self.quantiles[1]-range_vals,num_vals).reshape(-1,1)
grid_q = np.concatenate((grid_q_low,grid_q_high),1)
self.cv_quantiles = tune_params_cv.CV_quntiles_rf(self.params,
x,
y,
target_coverage,
grid_q,
self.params["test_ratio"],
self.params["random_state"],
coverage_factor)
self.rfqr.fit(x, y)
def predict(self, x):
""" Estimate the conditional low and high quantiles given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of estimated conditional quantiles (nX2)
"""
lower = self.rfqr.predict(x, quantile=self.cv_quantiles[0])
upper = self.rfqr.predict(x, quantile=self.cv_quantiles[1])
ret_val = np.zeros((len(lower),2))
ret_val[:,0] = lower
ret_val[:,1] = upper
return ret_val
| 22,414 | 36.927242 | 133 | py |
arc | arc-master/third_party/cqr_comparison/qr_net.py | import numpy as np
import torch
from functools import partial
import pdb
import os, sys
sys.path.insert(0, os.path.abspath("../third_party/"))
from cqr import torch_models
from nonconformist.base import RegressorAdapter
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
class NeuralNetworkQR:
""" Conditional quantile estimator, formulated as neural net
"""
def __init__(self, params, quantiles, verbose=False):
""" Initialization
Parameters
----------
model : None, unused parameter (for compatibility with nc class)
fit_params : None, unused parameter (for compatibility with nc class)
in_shape : integer, input signal dimension
hidden_size : integer, hidden layer dimension
quantiles : numpy array, low and high quantile levels in range (0,1)
learn_func : class of Pytorch's SGD optimizer
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
dropout : float, dropout rate
lr : float, learning rate for SGD
wd : float, weight decay
test_ratio : float, ratio of held-out data, used in cross-validation
random_state : integer, seed for splitting the data in cross-validation
References
----------
.. [1] Chernozhukov, Victor, Iván Fernández-Val, and Alfred Galichon.
"Quantile and probability curves without crossing."
Econometrica 78.3 (2010): 1093-1125.
"""
# Store parameters
self.params = params
self.quantiles = quantiles
self.verbose = verbose
# Instantiate model
self.epochs = params['epochs']
self.batch_size = params['batch_size']
dropout = params['dropout']
lr = params['lr']
wd = params['wd']
self.test_ratio = params['test_ratio']
self.random_state = params['random_state']
use_rearrangement = False
in_shape = params['in_shape']
hidden_size = params['hidden_size']
learn_func = torch.optim.Adam
self.model = torch_models.all_q_model(quantiles=self.quantiles,
in_shape=in_shape,
hidden_size=hidden_size,
dropout=dropout)
self.loss_func = torch_models.AllQuantileLoss(self.quantiles)
self.learner = torch_models.LearnerOptimizedCrossing(self.model,
partial(learn_func, lr=lr, weight_decay=wd),
self.loss_func,
device=device,
test_ratio=self.test_ratio,
random_state=self.random_state,
qlow=self.quantiles[0],
qhigh=self.quantiles[-1],
use_rearrangement=use_rearrangement)
def fit(self, X, y, cv=False):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
self.learner.fit(X, y, self.epochs, self.batch_size, verbose=self.verbose)
def predict(self, X, quantiles=None):
""" Estimate the conditional low and high quantiles given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of estimated conditional quantiles (nX3)
"""
return self.learner.predict(X)
| 3,909 | 36.961165 | 105 | py |
arc | arc-master/experiments_real_data/run_experiment_real_data.py | import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import os.path
from os import path
from datasets import GetDataset
import random
import torch
import sys
sys.path.insert(0, '..')
import arc
def assess_predictions(S, X, y):
# Marginal coverage
coverage = np.mean([y[i] in S[i] for i in range(len(y))])
# Average length
length = np.mean([len(S[i]) for i in range(len(y))])
# Average length conditional on coverage
idx_cover = np.where([y[i] in S[i] for i in range(len(y))])[0]
length_cover = np.mean([len(S[i]) for i in idx_cover])
# Conditional coverage (WSC)
cond_coverage = arc.coverage.wsc_unbiased(X, y, S)
# Combine results
out = pd.DataFrame({'Coverage': [coverage], 'Conditional coverage': [cond_coverage],
'Length': [length], 'Length cover': [length_cover]})
return out
def collect_predictions(S, X, y, condition_on):
cover = np.array([y[i] in S[i] for i in range(len(y))])
length = np.array([len(S[i]) for i in range(len(y))])
out = pd.DataFrame({'Cover': cover, 'Length': length})
var_name = "y"
out[var_name] = y
return out
def run_experiment(out_dir, dataset_name, dataset_base_path, n_train, alpha, experiment):
# load dataset
X, y = GetDataset(dataset_name, dataset_base_path)
y = y.astype(np.long)
# Determine output file
out_file_1 = out_dir + "/summary.csv"
out_file_2 = out_dir + "/full.csv"
out_files = [out_file_1, out_file_2]
print(out_files)
# Random state for this experiment
random_state = 2020 + experiment
# Set random seed
random.seed(random_state)
np.random.seed(random_state)
torch.manual_seed(random_state)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(random_state)
# List of calibration methods to be compared
if n_train <= 1000:
methods = {
'CCC': arc.methods.SplitConformal,
'CV+': arc.methods.CVPlus,
'JK+': arc.methods.JackknifePlus,
'HCC': arc.others.SplitConformalHomogeneous,
'CQC': arc.others.CQC,
'CQCRF': arc.others.CQCRF
}
else:
methods = {
'CCC': arc.methods.SplitConformal,
'CV+': arc.methods.CVPlus,
'HCC': arc.others.SplitConformalHomogeneous,
'CQC': arc.others.CQC,
'CQCRF': arc.others.CQCRF
}
# List of black boxes to be compared
black_boxes = {
'SVC': arc.black_boxes.SVC(random_state=random_state),
'RFC': arc.black_boxes.RFC(n_estimators=100,
criterion="gini",
max_depth=None,
max_features="auto",
min_samples_leaf=3,
random_state=random_state),
'NNet': arc.black_boxes.NNet(hidden_layer_sizes=64,
batch_size=128,
learning_rate_init=0.01,
max_iter=20,
random_state=random_state)
}
# Which special variables should we condition on to compute conditional coverage?
condition_on = [0]
# Total number of samples
n_test = min( X.shape[0] - n_train, 5000)
if n_test<=0:
return
# Split data into train/test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=n_test, random_state=random_state)
X_train = X_train[:n_train]
y_train = y_train[:n_train]
# Load pre-computed results
if path.exists(out_files[0]) & path.exists(out_files[1]):
results = pd.read_csv(out_files[0])
results_full = pd.read_csv(out_files[1])
else:
results = pd.DataFrame()
results_full = pd.DataFrame()
for box_name in black_boxes:
black_box = black_boxes[box_name]
for method_name in methods:
# Skip if this experiment has already been run
if results.shape[0] > 0:
found = (results['Method']==method_name)
found &= (results['Black box']==box_name)
found &= (results['Experiment']==experiment)
found &= (results['Nominal']==1-alpha)
found &= (results['n_train']==n_train)
found &= (results['n_test']==n_test)
found &= (results['dataset']==dataset_name)
else:
found = 0
if np.sum(found) > 0:
print("Skipping experiment with black-box {} and method {}...".format(box_name, method_name))
sys.stdout.flush()
continue
print("Running experiment with black-box {} and method {}...".format(box_name, method_name))
sys.stdout.flush()
# Train classification method
method = methods[method_name](X_train, y_train, black_box, alpha, random_state=random_state,
verbose=True)
# Apply classification method
S = method.predict(X_test)
# Evaluate results
res = assess_predictions(S, X_test, y_test)
# Add information about this experiment
res['Method'] = method_name
res['Black box'] = box_name
res['Experiment'] = experiment
res['Nominal'] = 1-alpha
res['n_train'] = n_train
res['n_test'] = n_test
res['dataset'] = dataset_name
# Evaluate results (conditional)
res_full = collect_predictions(S, X_test, y_test, condition_on)
# Add information about this experiment
res_full['Method'] = method_name
res_full['Black box'] = box_name
res_full['Experiment'] = experiment
res_full['Nominal'] = 1-alpha
res_full['n_train'] = n_train
res_full['n_test'] = n_test
res_full['dataset'] = dataset_name
# Add results to the list
results = results.append(res)
results_full = results_full.append(res_full)
# Write results on output files
if len(out_files) == 2:
if not os.path.exists(out_dir):
os.mkdir(out_dir)
results.to_csv(out_files[0], index=False, float_format="%.4f")
print("Updated summary of results on\n {}".format(out_files[0]))
results_full.to_csv(out_files[1], index=False, float_format="%.4f")
print("Updated full results on\n {}".format(out_files[1]))
sys.stdout.flush()
return results, results_full
| 6,950 | 37.192308 | 109 | py |
arc | arc-master/experiments_real_data/datasets.py |
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
import torchvision.datasets as datasets
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def GetDataset(name, base_path):
""" Load a dataset
Parameters
----------
name : string, dataset name
base_path : string, e.g. "path/to/datasets/directory/"
Returns
-------
X : features (nXp)
y : labels (n)
"""
if name=='mnist':
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(base_path, train=True, download=True,
transform=transforms.ToTensor()),batch_size=1, shuffle=False)
X = np.zeros((28*28,len(train_loader)),dtype=np.float32)
y = np.zeros(len(train_loader),dtype=np.int)
for i, (data, target) in enumerate(train_loader):
X[:,i] = data.view(-1)
y[i] = target
y = y - min(y)
pca = PCA(n_components=50)
X = pca.fit_transform(X.T)
scaler = StandardScaler()
X = scaler.fit_transform(X)
if name=='svhn':
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(base_path, download=True,
transform=transforms.ToTensor()),batch_size=1, shuffle=False)
X = np.zeros((32*32*3,len(train_loader)),dtype=np.float32)
y = np.zeros(len(train_loader),dtype=np.int)
for i, (data, target) in enumerate(train_loader):
X[:,i] = data.view(-1)
y[i] = target
y = y - min(y)
pca = PCA(n_components=50)
X = pca.fit_transform(X.T)
scaler = StandardScaler()
X = scaler.fit_transform(X)
if name=='fashion':
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(base_path + 'fashion/', train=True, download=True,
transform=transforms.ToTensor()),batch_size=1, shuffle=False)
X = np.zeros((28*28,len(train_loader)),dtype=np.float32)
y = np.zeros(len(train_loader),dtype=np.int)
for i, (data, target) in enumerate(train_loader):
X[:,i] = data.view(-1)
y[i] = target
y = y - min(y)
pca = PCA(n_components=50)
X = pca.fit_transform(X.T)
scaler = StandardScaler()
X = scaler.fit_transform(X)
if name=='cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(base_path, train=True, download=True,
transform=transforms.ToTensor()),batch_size=1, shuffle=False)
X = np.zeros((32*32*3,len(train_loader)),dtype=np.float32)
y = np.zeros(len(train_loader),dtype=np.int)
for i, (data, target) in enumerate(train_loader):
X[:,i] = data.view(-1)
y[i] = target
y = y - min(y)
pca = PCA(n_components=50)
X = pca.fit_transform(X.T)
scaler = StandardScaler()
X = scaler.fit_transform(X)
if name=='cifar100':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(base_path, train=True, download=True,
transform=transforms.ToTensor()),batch_size=1, shuffle=False)
X = np.zeros((32*32*3,len(train_loader)),dtype=np.float32)
y = np.zeros(len(train_loader),dtype=np.int)
for i, (data, target) in enumerate(train_loader):
X[:,i] = data.view(-1)
y[i] = target
y = y - min(y)
pca = PCA(n_components=50)
X = pca.fit_transform(X.T)
scaler = StandardScaler()
X = scaler.fit_transform(X)
if name=='ecoli':
data=pd.read_csv(base_path + 'ecoli.csv')
X = data.iloc[:,1:-1]
X = pd.get_dummies(X).values
y = data.iloc[:,-1]
y = y.astype('category').cat.codes.values
y = y - min(y)
scaler = StandardScaler()
X = scaler.fit_transform(X)
if name =='mice':
try:
import xlrd
except:
raise ImportError("To load this dataset, you need the library 'xlrd'. Try installing: pip install xlrd")
url="https://archive.ics.uci.edu/ml/machine-learning-databases/00342/Data_Cortex_Nuclear.xls"
data=pd.read_excel(url, header=0, na_values=['', ' '])
features = data.iloc[:,1:-4]
features = features.fillna(value=0)
X = pd.get_dummies(features).values
labels = data.iloc[:,-1]
y = labels.astype('category').cat.codes.values
scaler = StandardScaler()
X = scaler.fit_transform(X)
X = X.astype(np.float32)
y = y.astype(np.float32)
return X, y
| 4,903 | 31.263158 | 116 | py |
arc | arc-master/arc/others.py | import numpy as np
from sklearn.model_selection import train_test_split
from scipy.stats.mstats import mquantiles
# Note: skgarden has recent compatibility issues
#from skgarden import RandomForestQuantileRegressor
# Note: skgarden has recent compatibility issues
#import sys
#sys.path.insert(0, '../third_party')
#from cqr_comparison import NeuralNetworkQR
import torch
from arc.classification import ProbabilityAccumulator as ProbAccum
# Note: skgarden has recent compatibility issues
# class NeuralQuantileRegressor:
# def __init__(self, p, alpha, random_state=2020, verbose=True):
# # Parameters of the neural network
# params = dict()
# params['in_shape'] = p
# params['epochs'] = 1000
# params['lr'] = 0.0005
# params['hidden_size'] = 64
# params['batch_size'] = 64
# params['dropout'] = 0.1
# params['wd'] = 1e-6
# params['test_ratio'] = 0.05
# params['random_state'] = random_state
# # Which quantiles to estimate
# quantiles_net = [alpha, 1-alpha]
# np.random.seed(random_state)
# torch.manual_seed(random_state)
# self.model = NeuralNetworkQR(params, quantiles_net, verbose=verbose)
# def fit(self, X, y):
# # Reshape the data
# X = np.asarray(X)
# y = np.asarray(y)
# self.model.fit(X, y)
# def predict(self, X):
# y = self.model.predict(X)
# return y
# Note: skgarden has recent compatibility issues
# class ForestQuantileRegressor:
# def __init__(self, p, alpha, random_state=2020, verbose=True):
# # Parameters of the random forest
# self.alpha = 100*alpha
# self.model = RandomForestQuantileRegressor(random_state=random_state,
# min_samples_split=3,
# n_estimators=100)
# def fit(self, X, y):
# # Reshape the data
# X = np.asarray(X)
# y = np.asarray(y)
# self.model.fit(X, y)
# def predict(self, X):
# lower = self.model.predict(X, quantile=self.alpha)
# y = np.concatenate((lower[:,np.newaxis], self.model.predict(X, quantile=100.0-self.alpha)[:,np.newaxis]),1)
# return y
class SplitConformalHomogeneous:
def __init__(self, X, Y, black_box, alpha, random_state=2020, allow_empty=True, verbose=False):
# Split data into training/calibration sets
X_train, X_calib, Y_train, Y_calib = train_test_split(X, Y, test_size=0.5, random_state=random_state)
n2 = X_calib.shape[0]
self.black_box = black_box
self.alpha = alpha
self.allow_empty = allow_empty
# Fit model
self.black_box.fit(X_train, Y_train)
# Estimate probabilities on calibration data
p_hat_calib = self.black_box.predict_proba(X_calib)
# Break ties at random
rng = np.random.default_rng(random_state)
p_hat_calib += 1e-9 * rng.uniform(low=-1.0, high=1.0, size=p_hat_calib.shape)
p_hat_calib = p_hat_calib / p_hat_calib.sum(axis=1)[:,None]
p_y_calib = np.array([ p_hat_calib[i, Y_calib[i]] for i in range(len(Y_calib)) ])
# Compute threshold
level_adjusted = (1.0-alpha)*(1.0+1.0/float(n2))
self.threshold_calibrated = mquantiles(p_y_calib, prob=1.0-level_adjusted)
def predict(self, X, random_state=2020):
n = X.shape[0]
p_hat = self.black_box.predict_proba(X)
# Break ties at random
rng = np.random.default_rng(random_state)
p_hat += 1e-9 * rng.uniform(low=-1.0, high=1.0, size=p_hat.shape)
p_hat = p_hat / p_hat.sum(axis=1)[:,None]
# Make prediction sets
S_hat = [None]*n
for i in range(n):
S_hat[i] = np.where(p_hat[i,:] >= self.threshold_calibrated)[0]
if (not self.allow_empty) and (len(S_hat[i])==0):
S_hat[i] = [np.argmax(p_hat[i,:])]
return S_hat
class BaseCQC:
def __init__(self, X, y, black_box, alpha, qr_method, random_state=2020, allow_empty=True, verbose=False):
self.allow_empty = allow_empty
# Problem dimensions
self.p = X.shape[1]
# Alpha for conformal prediction intervals
self.alpha = alpha
# Black box for probability estimates
self.black_box = black_box
if qr_method == "NNet":
# Quantiles for neural network
self.quantile_black_box = NeuralQuantileRegressor(self.p,
self.alpha,
random_state=random_state,
verbose=verbose)
# elif qr_method == "RF":
# self.quantile_black_box = ForestQuantileRegressor(self.p,
# self.alpha,
# random_state=random_state,
# verbose=verbose)
else:
raise
# Split data into training and calibration sets
X_train, X_calibration, y_train, y_calibration = train_test_split(X, y, test_size=0.333,
random_state=random_state)
# Further split training data
X_train_1, X_train_2, y_train_1, y_train_2 = train_test_split(X_train, y_train, test_size=0.5,
random_state=random_state)
# Estimate probabilities with the black box on the first training set
n1 = X_train_1.shape[0]
if verbose:
print("Training the black-box classifier with {} samples...". format(n1))
sys.stdout.flush()
self.black_box.fit(X_train_1, y_train_1)
p_hat_2 = self.black_box.predict_proba(X_train_2)
sys.stdout.flush()
# Compute scores on second training set
n2 = X_train_2.shape[0]
scores_2 = np.array([p_hat_2[i,int(y_train_2[i])] for i in range(n2)])
# Train the quantile estimator on the above scores
if verbose:
print("Training the quantile regression black box with {} samples...". format(n2))
sys.stdout.flush()
self.quantile_black_box.fit(X_train_2, scores_2)
# Estimate the quantiles on the calibration data (keep only the upper quantiles)
q_hat = self.quantile_black_box.predict(X_calibration)[:,1]
sys.stdout.flush()
# Compute conformity scores
n3 = X_calibration.shape[0]
p_hat_3 = self.black_box.predict_proba(X_calibration)
scores_3 = np.array([p_hat_3[i,int(y_calibration[i])] for i in range(n3)])
conf_scores = q_hat - scores_3
# Compute quantile of conformity scores
level_adjusted = (1.0-self.alpha)*(1.0+1.0/float(n3))
self.score_correction = mquantiles(conf_scores, prob=level_adjusted)
def predict(self, X):
n = X.shape[0]
p_hat = self.black_box.predict_proba(X)
q_hat = self.quantile_black_box.predict(X)[:,1]
S = [None]*n
for i in range(n):
S[i] = np.where(p_hat[i,:] >= q_hat[i] - self.score_correction)[0]
if (not self.allow_empty) and (len(S[i])==0):
S[i] = [np.argmax(p_hat[i,:])]
return S
class CQC:
def __init__(self, X, y, black_box, alpha, random_state=2020, allow_empty=True, verbose=False):
self.base_cqc = BaseCQC(X, y, black_box, alpha, "NNet", random_state=random_state, allow_empty=allow_empty, verbose=verbose)
def predict(self, X):
return self.base_cqc.predict(X)
# class CQCRF:
# def __init__(self, X, y, black_box, alpha, random_state=2020, allow_empty=True, verbose=False):
# self.base_cqc = BaseCQC(X, y, black_box, alpha, "RF", random_state=random_state, allow_empty=allow_empty, verbose=verbose)
# def predict(self, X):
# return self.base_cqc.predict(X)
class Oracle:
def __init__(self, data_model, alpha, random_state=2020, allow_empty=True, verbose=True):
self.data_model = data_model
self.alpha = alpha
self.allow_empty = allow_empty
def predict(self, X, randomize=True, random_state=2020):
if randomize:
rng = np.random.default_rng(random_state)
epsilon = rng.uniform(low=0.0, high=1.0, size=X.shape[0])
else:
epsilon = None
prob_y = self.data_model.compute_prob(X)
grey_box = ProbAccum(prob_y)
S = grey_box.predict_sets(self.alpha, epsilon=epsilon, allow_empty=self.allow_empty)
return S
| 9,005 | 40.311927 | 132 | py |
RefVAE | RefVAE-main/main_GAN.py | from __future__ import print_function
import argparse
from math import log10
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from laploss import LapLoss
from torch.utils.data import DataLoader
import torch.nn.functional as F
from model import *
from network import encoder4, decoder4
from image_utils import TVLoss
from data import get_training_set
import numpy as np
from pytorch_msssim import SSIM as pytorch_ssim
import time
from lpips import lpips
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--up_factor', type=int, default=4, help='upsampling factor')
parser.add_argument('--batchSize', type=int, default=8, help='training batch size')
parser.add_argument('--nEpochs', type=int, default=500, help='number of epochs to train for')
parser.add_argument('--snapshots', type=int, default=5, help='Snapshots')
parser.add_argument('--start_iter', type=int, default=1, help='Starting Epoch')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning Rate. Default=0.0001')
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--threads', type=int, default=6, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--gpus', default=2, type=int, help='number of gpu')
parser.add_argument('--data_dir', type=str, default='/home/server2/ZSLiu/NTIRE2021/data/SR/')
parser.add_argument("--ref_dir", type=str, default="/home/server2/ZSLiu/style_transfer/Data/wikiart",
help='path to wikiArt dataset')
parser.add_argument('--data_augmentation', type=bool, default=True)
parser.add_argument('--model_type', type=str, default='GAN')
parser.add_argument('--patch_size', type=int, default=64, help='Size of cropped LR image')
parser.add_argument('--pretrained_G_model', default='GAN_generator_v18.pth', help='pretrained G model')
parser.add_argument('--pretrained_D_model', default='GAN_discriminator_0.pth', help='pretrained D model')
parser.add_argument('--pretrained', type=bool, default=True)
parser.add_argument('--save_folder', default='models/', help='Location to save checkpoint models')
parser.add_argument("--encoder_dir", default='models/vgg_r41.pth', help='pre-trained encoder path')
parser.add_argument("--decoder_dir", default='models/dec_r41.pth', help='pre-trained encoder path')
opt = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(epoch):
G_epoch_loss = 0
D_epoch_loss = 0
G.train()
D.train()
enc.eval()
dec.eval()
for iteration, batch in enumerate(training_data_loader, 1):
input, target, ref = batch[0], batch[1], batch[2]
minibatch = input.size()[0]
real_label = torch.ones((minibatch, 1344))
fake_label = torch.zeros((minibatch, 1344))
input = input.to(device)
target = target.to(device)
ref = ref.to(device)
real_label = real_label.to(device)
fake_label = fake_label.to(device)
# Reset gradient
for p in D.parameters():
p.requires_grad = False
G_optimizer.zero_grad()
# encoder
bic = F.interpolate(input, scale_factor=opt.up_factor, mode='bicubic')
# ref_feat = enc(ref)
tar_feat = enc(target)
LR_feat = enc(bic)
predict, KL = G(input, LR_feat['r41'], tar_feat['r41'])
# predict = dec(LR_feat)
pre_LR = F.interpolate(predict, scale_factor=1.0 / opt.up_factor, mode='bicubic')
# Reconstruction loss
LR_loss = L1_criterion(pre_LR, input)
# pre_feat = enc(predict)
# tar_feat = enc(target)
# SR_loss = L1_criterion(pre_feat['r41'], tar_feat['r41'])
SR_L1 = L1_criterion(predict, target) + \
L1_criterion(F.interpolate(predict, scale_factor=0.5, mode='bicubic'), F.interpolate(target, scale_factor=0.5, mode='bicubic')) + \
L1_criterion(F.interpolate(predict, scale_factor=0.25, mode='bicubic'), F.interpolate(target, scale_factor=0.25, mode='bicubic'))
# SR_lap = lap_loss(2*predict-1, 2*target-1)
KL_loss = KL.mean()
# RE = log_Logistic_256(target, pre_mean, pre_logvar, average=False, dim=1)
# RE = RE.mean()
# ssim_loss = 1 - ssim(predict, target)
# lap_recon = lap_loss(predict, target)
# TV_loss = TV(predict)
# PD_feat, mean_var_loss = PD_loss(predict, target)
ContentLoss, StyleLoss = VGG_feat(predict, target)
# VGG_loss = L1_criterion(pre_feat4, tar_feat4)
lpips_sp = loss_fn_alex_sp(2 * predict - 1, 2 * target - 1)
lpips_sp = lpips_sp.mean()
D_fake_feat, D_fake_decision = D(predict)
D_real_feat, D_real_decision = D(target)
GAN_feat_loss = L1_criterion(D_fake_feat, D_real_feat)
GAN_loss = L1_criterion(D_fake_decision, real_label)
G_loss = 100 * LR_loss + 1 * SR_L1 + 0.01 * GAN_feat_loss + 1 * GAN_loss + 0.01 * ContentLoss + 10*StyleLoss + 1*KL_loss + 1e4*lpips_sp
G_loss.backward()
G_optimizer.step()
# Reset gradient
for p in D.parameters():
p.requires_grad = True
D_optimizer.zero_grad()
_, D_fake_decision = D(predict.detach())
_, D_real_decision = D(target)
real = real_label * np.random.uniform(0.7, 1.2)
fake = fake_label + np.random.uniform(0.0, 0.3)
Dis_loss = (L1_criterion(D_real_decision, real)
+ L1_criterion(D_fake_decision, fake)) / 2.0
# Back propagation
D_loss = Dis_loss
D_loss.backward()
D_optimizer.step()
G_epoch_loss += G_loss.data
D_epoch_loss += D_loss.data
print("===> Epoch[{}]({}/{}): G_loss: {:.4f} || "
"D_loss: {:.4f} || "
"LR_loss: {:.4f} || "
"SR_L1: {:.4f} || "
"GAN_loss: {:.4f} || "
"KL_loss: {:.4f} || "
"ContentLoss: {:.4f} || "
"StyleLoss: {:.4f} ||"
"lpips_sp: {:.4f} ||"
"GAN_feat_loss: {:.4f} ||".format(epoch, iteration,
len(training_data_loader),
G_loss.data,
D_loss.data,
LR_loss.data,
SR_L1.data,
GAN_loss.data,
KL_loss.data,
ContentLoss.data,
StyleLoss.data,
lpips_sp.data,
GAN_feat_loss.data))
print("===> Epoch {} Complete: Avg. G Loss: {:.4f} || D Loss: {:.4f}".format(epoch, G_epoch_loss / len(training_data_loader),
D_epoch_loss / len(training_data_loader)))
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def checkpoint(epoch):
G_model_out_path = opt.save_folder + opt.model_type + "_generator_{}.pth".format(epoch)
D_model_out_path = opt.save_folder + opt.model_type + "_discriminator_{}.pth".format(epoch)
torch.save(G.state_dict(), G_model_out_path)
torch.save(D.state_dict(), D_model_out_path)
print("Checkpoint saved to {} and {}".format(G_model_out_path, D_model_out_path))
print('===> Loading datasets')
train_set = get_training_set(opt.data_dir, opt.patch_size, opt.up_factor,
opt.data_augmentation)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print('===> Building model ', opt.model_type)
# model = RCAN(num_in_ch=3, num_out_ch=3, up_factor=opt.up_factor)
enc = encoder4()
dec = decoder4()
G = VAE_v3_4x(up_factor=opt.up_factor)
D = discriminator_v2(num_channels=3, base_filter=32)
L1_criterion = nn.L1Loss(size_average=False)
L2_criterion = nn.MSELoss(size_average=False)
TV = TVLoss()
ssim = pytorch_ssim()
lap_loss = LapLoss(max_levels=5, k_size=5, sigma=2.0)
# PD_loss = PDLoss(device, l1_lambda=1.5, w_lambda=0.01)
VGG_feat = Vgg19_feat(device)
loss_fn_alex_sp = lpips.LPIPS(spatial=True)
print('---------- Generator architecture -------------')
print_network(G)
print('----------------------------------------------')
print('---------- Discriminator architecture -------------')
print_network(D)
print('----------------------------------------------')
if os.path.exists(opt.encoder_dir):
enc.load_state_dict(torch.load(opt.encoder_dir))
print('encoder model is loaded!')
if os.path.exists(opt.decoder_dir):
dec.load_state_dict(torch.load(opt.decoder_dir))
print('decoder model is loaded!')
for param in enc.parameters():
param.requires_grad = False
for param in dec.parameters():
param.requires_grad = False
if opt.pretrained:
G_model_name = os.path.join(opt.save_folder + opt.pretrained_G_model)
D_model_name = os.path.join(opt.save_folder + opt.pretrained_D_model)
if os.path.exists(G_model_name):
pretrained_dict = torch.load(G_model_name, map_location=lambda storage, loc: storage)
model_dict = G.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
G.load_state_dict(model_dict)
# G.load_state_dict(torch.load(G_model_name, map_location=lambda storage, loc: storage))
print('Pre-trained Generator is loaded.')
if os.path.exists(D_model_name):
D.load_state_dict(torch.load(D_model_name, map_location=lambda storage, loc: storage))
print('Pre-trained Discriminator is loaded.')
# if torch.cuda.device_count() > 1:
# G = torch.nn.DataParallel(G)
# D = torch.nn.DataParallel(D)
enc = enc.to(device)
dec = dec.to(device)
G = G.to(device)
D = D.to(device)
VGG_feat = VGG_feat.to(device)
L1_criterion = L1_criterion.to(device)
ssim = ssim.to(device)
lap_loss = lap_loss.to(device)
TV = TV.to(device)
loss_fn_alex_sp = loss_fn_alex_sp.to(device)
G_optimizer = optim.Adam(G.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8)
D_optimizer = optim.Adam(D.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8)
for epoch in range(opt.start_iter, opt.nEpochs + 1):
train(epoch)
if epoch % (opt.nEpochs / 2) == 0:
for param_group in G_optimizer.param_groups:
param_group['lr'] /= 10.0
print('Learning rate decay: lr={}'.format(G_optimizer.param_groups[0]['lr']))
if epoch % (opt.snapshots) == 0:
checkpoint(epoch)
| 10,971 | 37.633803 | 147 | py |
RefVAE | RefVAE-main/test.py | from __future__ import print_function
import argparse
import os
import torch
import cv2
from model import *
import torchvision.transforms as transforms
from collections import OrderedDict
import numpy as np
from os.path import join
import time
from network import encoder4, decoder4
import numpy
from dataset import is_image_file
from image_utils import *
from PIL import Image, ImageOps
from os import listdir
import torch.utils.data as utils
import os
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--testBatchSize', type=int, default=8, help='testing batch size')
parser.add_argument('--up_factor', type=int, default=4, help="super resolution upscale factor")
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--chop_forward', type=bool, default=True)
parser.add_argument('--patch_size', type=int, default=64, help='0 to use original frame size')
parser.add_argument('--stride', type=int, default=4, help='0 to use original patch size')
parser.add_argument('--threads', type=int, default=6, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--gpus', default=1, type=int, help='number of gpu')
parser.add_argument('--image_dataset', type=str, default='data/SR/Set5/')
parser.add_argument('--model_type', type=str, default='VAE')
parser.add_argument('--distortion', type=int, default=1)
parser.add_argument('--model', default='GAN_generator_50.pth', help='sr pretrained base model')
parser.add_argument("--encoder_dir", default='models/vgg_r41.pth', help='pre-trained encoder path')
parser.add_argument("--decoder_dir", default='models/dec_r41.pth', help='pre-trained encoder path')
opt = parser.parse_args()
print(opt)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('===> Building model ', opt.model_type)
# def apply_dropout(m):
# if type(m) == nn.Dropout:
# m.train()
model = VAE_v3_4x(up_factor=opt.up_factor)
enc = encoder4()
dec = decoder4()
if os.path.exists(opt.encoder_dir):
enc.load_state_dict(torch.load(opt.encoder_dir))
print('encoder model is loaded!')
if os.path.exists(opt.decoder_dir):
dec.load_state_dict(torch.load(opt.decoder_dir))
print('decoder model is loaded!')
for param in enc.parameters():
param.requires_grad = False
for param in dec.parameters():
param.requires_grad = False
# model_name = 'models/' + opt.model
# if os.path.exists(model_name):
# model.load_state_dict(torch.load(model_name, map_location=lambda storage, loc: storage))
# print(model_name)
# model = torch.nn.DataParallel(model, device_ids=gpus_list)
# mat_ncc = mat_ncc.to(device)
model = model.to(device)
enc = enc.to(device)
dec = dec.to(device)
print('===> Loading datasets')
def eval(i):
model.eval()
enc.eval()
dec.eval()
model_name = 'models/GAN_generator_'+str(i)+'.pth'
if os.path.exists(model_name):
model.load_state_dict(torch.load(model_name, map_location=lambda storage, loc: storage))
print(model_name)
HR_filename = os.path.join(opt.image_dataset, 'HR')
Ref_filename = os.path.join(opt.image_dataset, 'Ref')
# LR_filename = os.path.join(opt.image_dataset, 'hazy')
SR_filename = os.path.join(opt.image_dataset, 'SR')
SLR_filename = os.path.join(opt.image_dataset, 'SLR')
gt_image = [join(HR_filename, x) for x in listdir(HR_filename) if is_image_file(x)]
ref_image = [join(Ref_filename, x) for x in listdir(Ref_filename) if is_image_file(x)]
ref_image = sorted(ref_image)
output_image = [join(SR_filename, x) for x in listdir(HR_filename) if is_image_file(x)]
slr_output_image = [join(SLR_filename, x) for x in listdir(HR_filename) if is_image_file(x)]
count = 0
avg_psnr_predicted = 0.0
avg_ssim_predicted = 0.0
avg_psnr_LR = 0.0
avg_ssim_LR = 0.0
t0 = time.time()
# ran_patch = torch.randint(896, (2,))
for i in range(gt_image.__len__()):
HR = Image.open(gt_image[i]).convert('RGB')
HR = modcrop(HR, opt.up_factor)
Ref = Image.open(ref_image[2]).convert('RGB')
Ref = modcrop(Ref, opt.up_factor)
LR = rescale_img(HR, 1.0/opt.up_factor)
with torch.no_grad():
pre_LR, prediction = chop_forward(Ref, LR)
# print("===> Processing: %s || Timer: %.4f sec." % (str(i), (t1 - t0)))
prediction = prediction.data[0].cpu().permute(1, 2, 0)
pre_LR = pre_LR.data[0].cpu().permute(1, 2, 0)
prediction = prediction * 255.0
pre_LR = pre_LR * 255.0
prediction = prediction.clamp(0, 255)
pre_LR = pre_LR.clamp(0, 255)
Image.fromarray(np.uint8(prediction)).save(output_image[i])
Image.fromarray(np.uint8(pre_LR)).save(slr_output_image[i])
GT = np.array(HR).astype(np.float32)
GT_Y = rgb2ycbcr(GT)
LR = np.array(LR).astype(np.float32)
LR_Y = rgb2ycbcr(LR)
prediction = np.array(prediction).astype(np.float32)
pre_LR = np.array(pre_LR).astype(np.float32)
prediction_Y = rgb2ycbcr(prediction)
pre_LR_Y = rgb2ycbcr(pre_LR)
psnr_predicted = PSNR(prediction_Y, GT_Y, shave_border=opt.up_factor)
ssim_predicted = SSIM(prediction_Y, GT_Y, shave_border=opt.up_factor)
avg_psnr_predicted += psnr_predicted
avg_ssim_predicted += ssim_predicted
psnr_predicted = PSNR(pre_LR_Y, LR_Y, shave_border=1)
ssim_predicted = SSIM(pre_LR_Y, LR_Y, shave_border=1)
avg_psnr_LR += psnr_predicted
avg_ssim_LR += ssim_predicted
count += 1
t1 = time.time()
avg_psnr_predicted = avg_psnr_predicted / count
avg_ssim_predicted = avg_ssim_predicted / count
avg_psnr_LR = avg_psnr_LR / count
avg_ssim_LR = avg_ssim_LR / count
avg_time_predicted = t1 - t0
print("PSNR_predicted= {:.4f} || "
"SSIM_predicted= {:.4f} || "
"PSNR_LR= {:.4f} || "
"SSIM_LR= {:.4f} || Time= {:.4f} ".format(
avg_psnr_predicted,
avg_ssim_predicted,
avg_psnr_LR,
avg_ssim_LR,
avg_time_predicted))
transform = transforms.Compose([
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
]
)
def chop_forward(ref, img):
img = transform(img).unsqueeze(0)
ref = transform(ref).unsqueeze(0)
testset = utils.TensorDataset(ref, img)
test_dataloader = utils.DataLoader(testset, num_workers=opt.threads,
drop_last=False, batch_size=opt.testBatchSize, shuffle=False)
std_z = torch.from_numpy(np.random.normal(0, 1, (1, 256))).float()
z_q = std_z.to(device)
for iteration, batch in enumerate(test_dataloader, 1):
ref, input = batch[0].to(device), batch[1].to(device)
batch_size, channels, img_height, img_width = input.size()
# eps = torch.randn(input.shape[0], 64, 1, 1).to(device)
# eps = torch.from_numpy(np.random.normal(0, 1, (input.shape[0], 3, 256, 256))).float()
# eps = eps.to(device)
#
# LR_patches = patchify_tensor(input, patch_size=opt.patch_size, overlap=opt.stride)
# n_patches = LR_patches.size(0)
# out_box = []
# with torch.no_grad():
# for p in range(n_patches):
# LR_input = LR_patches[p:p + 1]
# LR_feat = enc(F.interpolate(LR_input, scale_factor=opt.up_factor, mode='bicubic'))
# ref_feat = enc(ref)
# SR, _ = model(LR_input, LR_feat['r41'], ref_feat['r41'])
# out_box.append(SR)
#
# out_box = torch.cat(out_box, 0)
# SR = recompose_tensor(out_box, opt.up_factor * img_height, opt.up_factor * img_width,
# overlap=opt.up_factor * opt.stride)
LR_feat = enc(F.interpolate(input, scale_factor=opt.up_factor, mode='bicubic'))
ref_feat = enc(ref)
SR, _ = model(input, LR_feat['r41'], ref_feat['r41'])
LR = F.interpolate(SR, scale_factor=1/opt.up_factor, mode='bicubic')
return LR, SR
##Eval Start!!!!
for i in range(5, 475, 5):
eval(i)
| 8,446 | 35.5671 | 102 | py |
RefVAE | RefVAE-main/image_utils.py | import torch
import numpy as np
from PIL import Image
import math
import cv2
class TVLoss(torch.nn.Module):
def __init__(self):
super(TVLoss,self).__init__()
def forward(self,x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:,:,1:,:])
count_w = self._tensor_size(x[:,:,:,1:])
h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()
w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()
# return 2*(h_tv/count_h+w_tv/count_w)/batch_size
return 2 * (h_tv + w_tv)
def _tensor_size(self,t):
return t.size()[1]*t.size()[2]*t.size()[3]
def log_Logistic_256(x, mean, logvar, average=False, reduce=True, dim=None):
x = x.view(x.size(0), -1)
mean = mean.view(x.size(0), -1)
logvar = logvar.view(x.size(0), -1)
bin_size = 1. / 256.
# implementation like https://github.com/openai/iaf/blob/master/tf_utils/distributions.py#L28
scale = torch.exp(logvar)
x = (torch.floor(x / bin_size) * bin_size - mean) / scale
cdf_plus = torch.sigmoid(x + bin_size/scale)
cdf_minus = torch.sigmoid(x)
# calculate final log-likelihood for an image
log_logist_256 = - torch.log(cdf_plus - cdf_minus + 1.e-7)
if reduce:
if average:
return torch.mean(log_logist_256, dim)
else:
return torch.sum(log_logist_256, dim)
else:
return log_logist_256
def reduce_image(img, scale):
batch, channels, height, width = img.size()
reduced_img = torch.zeros(batch, channels * scale * scale, height // scale, width // scale).cuda()
for x in range(scale):
for y in range(scale):
for c in range(channels):
reduced_img[:, c + channels * (y + scale * x), :, :] = img[:, c, x::scale, y::scale]
return reduced_img
def reconstruct_image(features, scale):
batch, channels, height, width = features.size()
img_channels = channels // (scale**2)
reconstructed_img = torch.zeros(batch, img_channels, height * scale, width * scale).cuda()
for x in range(scale):
for y in range(scale):
for c in range(img_channels):
f_channel = c + img_channels * (y + scale * x)
reconstructed_img[:, c, x::scale, y::scale] = features[:, f_channel, :, :]
return reconstructed_img
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
# side = min(height, width, patch_size)
# delta = patch_size - side
# Z = torch.zeros([batch_size, channels, height + delta, width + delta])
# Z[:, :, delta // 2:height + delta // 2, delta // 2:width + delta // 2] = features
# features = Z
# batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
def modcrop(img, modulo):
(ih, iw) = img.size
ih = ih - (ih % modulo)
iw = iw - (iw % modulo)
img = img.crop((0, 0, ih, iw))
#y, cb, cr = img.split()
return img
def rescale_img(img_in, scale):
(w, h) = img_in.size
new_size_in = (int(scale*w), int(scale*h))
img_in = img_in.resize(new_size_in, resample=Image.BICUBIC)
return img_in
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
float32, [0, 255]
float32, [0, 255]
'''
img.astype(np.float32)
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
rlt = rlt.round()
return rlt
def PSNR(pred, gt, shave_border):
pred = pred[shave_border:-shave_border, shave_border:-shave_border]
gt = gt[shave_border:-shave_border, shave_border:-shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
def calculate_ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def SSIM(img1, img2, shave_border):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
img1 = img1[shave_border:-shave_border, shave_border:-shave_border]
img2 = img2[shave_border:-shave_border, shave_border:-shave_border]
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return calculate_ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(calculate_ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return calculate_ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.') | 9,144 | 37.104167 | 184 | py |
RefVAE | RefVAE-main/network.py | import torch
import torch.nn as nn
class encoder3(nn.Module):
def __init__(self):
super(encoder3,self).__init__()
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,128,3,1,0)
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 56 x 56
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(128,256,3,1,0)
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out,pool_idx = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out,pool_idx2 = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
class decoder3(nn.Module):
def __init__(self):
super(decoder3,self).__init__()
# decoder
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(256,128,3,1,0)
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 112 x 112
self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(128,128,3,1,0)
self.relu8 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(128,64,3,1,0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 224 x 224
self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(64,64,3,1,0)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(64,3,3,1,0)
def forward(self,x):
output = {}
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out_relu9 = self.relu9(out)
out = self.unpool2(out_relu9)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
return out
class encoder4(nn.Module):
def __init__(self):
super(encoder4,self).__init__()
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,128,3,1,0)
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2)
# 56 x 56
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(128,256,3,1,0)
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(256,256,3,1,0)
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(256,256,3,1,0)
self.relu8 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(256,256,3,1,0)
self.relu9 = nn.ReLU(inplace=True)
# 56 x 56
self.maxPool3 = nn.MaxPool2d(kernel_size=2,stride=2)
# 28 x 28
self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(256,512,3,1,0)
self.relu10 = nn.ReLU(inplace=True)
# 28 x 28
def forward(self,x,sF=None,matrix11=None,matrix21=None,matrix31=None):
output = {}
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
output['r11'] = self.relu2(out)
out = self.reflecPad7(output['r11'])
out = self.conv3(out)
output['r12'] = self.relu3(out)
output['p1'] = self.maxPool(output['r12'])
out = self.reflecPad4(output['p1'])
out = self.conv4(out)
output['r21'] = self.relu4(out)
out = self.reflecPad7(output['r21'])
out = self.conv5(out)
output['r22'] = self.relu5(out)
output['p2'] = self.maxPool2(output['r22'])
out = self.reflecPad6(output['p2'])
out = self.conv6(out)
output['r31'] = self.relu6(out)
if(matrix31 is not None):
feature3,transmatrix3 = matrix31(output['r31'],sF['r31'])
out = self.reflecPad7(feature3)
else:
out = self.reflecPad7(output['r31'])
out = self.conv7(out)
output['r32'] = self.relu7(out)
out = self.reflecPad8(output['r32'])
out = self.conv8(out)
output['r33'] = self.relu8(out)
out = self.reflecPad9(output['r33'])
out = self.conv9(out)
output['r34'] = self.relu9(out)
output['p3'] = self.maxPool3(output['r34'])
out = self.reflecPad10(output['p3'])
out = self.conv10(out)
output['r41'] = self.relu10(out)
return output
class decoder4(nn.Module):
def __init__(self):
super(decoder4,self).__init__()
# decoder
self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(512,256,3,1,0)
self.relu11 = nn.ReLU(inplace=True)
# 28 x 28
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 56 x 56
self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
self.conv12 = nn.Conv2d(256,256,3,1,0)
self.relu12 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
self.conv13 = nn.Conv2d(256,256,3,1,0)
self.relu13 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
self.conv14 = nn.Conv2d(256,256,3,1,0)
self.relu14 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
self.conv15 = nn.Conv2d(256,128,3,1,0)
self.relu15 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 112 x 112
self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
self.conv16 = nn.Conv2d(128,128,3,1,0)
self.relu16 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
self.conv17 = nn.Conv2d(128,64,3,1,0)
self.relu17 = nn.ReLU(inplace=True)
# 112 x 112
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
# 224 x 224
self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
self.conv18 = nn.Conv2d(64,64,3,1,0)
self.relu18 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
self.conv19 = nn.Conv2d(64,3,3,1,0)
def forward(self,x):
# decoder
out = self.reflecPad11(x)
out = self.conv11(out)
out = self.relu11(out)
out = self.unpool(out)
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
out = self.reflecPad15(out)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool2(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.unpool3(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
return out
class encoder5(nn.Module):
def __init__(self):
super(encoder5,self).__init__()
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,128,3,1,0)
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2)
# 56 x 56
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(128,256,3,1,0)
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(256,256,3,1,0)
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(256,256,3,1,0)
self.relu8 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(256,256,3,1,0)
self.relu9 = nn.ReLU(inplace=True)
# 56 x 56
self.maxPool3 = nn.MaxPool2d(kernel_size=2,stride=2)
# 28 x 28
self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(256,512,3,1,0)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(512,512,3,1,0)
self.relu11 = nn.ReLU(inplace=True)
self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
self.conv12 = nn.Conv2d(512,512,3,1,0)
self.relu12 = nn.ReLU(inplace=True)
self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
self.conv13 = nn.Conv2d(512,512,3,1,0)
self.relu13 = nn.ReLU(inplace=True)
self.maxPool4 = nn.MaxPool2d(kernel_size=2,stride=2)
self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
self.conv14 = nn.Conv2d(512,512,3,1,0)
self.relu14 = nn.ReLU(inplace=True)
def forward(self,x,sF=None,contentV256=None,styleV256=None,matrix11=None,matrix21=None,matrix31=None):
output = {}
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
output['r11'] = self.relu2(out)
out = self.reflecPad7(output['r11'])
#out = self.reflecPad3(output['r11'])
out = self.conv3(out)
output['r12'] = self.relu3(out)
output['p1'] = self.maxPool(output['r12'])
out = self.reflecPad4(output['p1'])
out = self.conv4(out)
output['r21'] = self.relu4(out)
out = self.reflecPad7(output['r21'])
#out = self.reflecPad5(output['r21'])
out = self.conv5(out)
output['r22'] = self.relu5(out)
output['p2'] = self.maxPool2(output['r22'])
out = self.reflecPad6(output['p2'])
out = self.conv6(out)
output['r31'] = self.relu6(out)
if(styleV256 is not None):
feature = matrix31(output['r31'],sF['r31'],contentV256,styleV256)
out = self.reflecPad7(feature)
else:
out = self.reflecPad7(output['r31'])
out = self.conv7(out)
output['r32'] = self.relu7(out)
out = self.reflecPad8(output['r32'])
out = self.conv8(out)
output['r33'] = self.relu8(out)
out = self.reflecPad9(output['r33'])
out = self.conv9(out)
output['r34'] = self.relu9(out)
output['p3'] = self.maxPool3(output['r34'])
out = self.reflecPad10(output['p3'])
out = self.conv10(out)
output['r41'] = self.relu10(out)
out = self.reflecPad11(output['r41'])
out = self.conv11(out)
output['r42'] = self.relu11(out)
out = self.reflecPad12(output['r42'])
out = self.conv12(out)
output['r43'] = self.relu12(out)
out = self.reflecPad13(output['r43'])
out = self.conv13(out)
output['r44'] = self.relu13(out)
output['p4'] = self.maxPool4(output['r44'])
out = self.reflecPad14(output['p4'])
out = self.conv14(out)
output['r51'] = self.relu14(out)
return output
class decoder5(nn.Module):
def __init__(self):
super(decoder5,self).__init__()
# decoder
self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
self.conv15 = nn.Conv2d(512,512,3,1,0)
self.relu15 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 28 x 28
self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
self.conv16 = nn.Conv2d(512,512,3,1,0)
self.relu16 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
self.conv17 = nn.Conv2d(512,512,3,1,0)
self.relu17 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
self.conv18 = nn.Conv2d(512,512,3,1,0)
self.relu18 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
self.conv19 = nn.Conv2d(512,256,3,1,0)
self.relu19 = nn.ReLU(inplace=True)
# 28 x 28
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 56 x 56
self.reflecPad20 = nn.ReflectionPad2d((1,1,1,1))
self.conv20 = nn.Conv2d(256,256,3,1,0)
self.relu20 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad21 = nn.ReflectionPad2d((1,1,1,1))
self.conv21 = nn.Conv2d(256,256,3,1,0)
self.relu21 = nn.ReLU(inplace=True)
self.reflecPad22 = nn.ReflectionPad2d((1,1,1,1))
self.conv22 = nn.Conv2d(256,256,3,1,0)
self.relu22 = nn.ReLU(inplace=True)
self.reflecPad23 = nn.ReflectionPad2d((1,1,1,1))
self.conv23 = nn.Conv2d(256,128,3,1,0)
self.relu23 = nn.ReLU(inplace=True)
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
# 112 X 112
self.reflecPad24 = nn.ReflectionPad2d((1,1,1,1))
self.conv24 = nn.Conv2d(128,128,3,1,0)
self.relu24 = nn.ReLU(inplace=True)
self.reflecPad25 = nn.ReflectionPad2d((1,1,1,1))
self.conv25 = nn.Conv2d(128,64,3,1,0)
self.relu25 = nn.ReLU(inplace=True)
self.unpool4 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad26 = nn.ReflectionPad2d((1,1,1,1))
self.conv26 = nn.Conv2d(64,64,3,1,0)
self.relu26 = nn.ReLU(inplace=True)
self.reflecPad27 = nn.ReflectionPad2d((1,1,1,1))
self.conv27 = nn.Conv2d(64,3,3,1,0)
def forward(self,x):
# decoder
out = self.reflecPad15(x)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
out = self.relu19(out)
out = self.unpool2(out)
out = self.reflecPad20(out)
out = self.conv20(out)
out = self.relu20(out)
out = self.reflecPad21(out)
out = self.conv21(out)
out = self.relu21(out)
out = self.reflecPad22(out)
out = self.conv22(out)
out = self.relu22(out)
out = self.reflecPad23(out)
out = self.conv23(out)
out = self.relu23(out)
out = self.unpool3(out)
out = self.reflecPad24(out)
out = self.conv24(out)
out = self.relu24(out)
out = self.reflecPad25(out)
out = self.conv25(out)
out = self.relu25(out)
out = self.unpool4(out)
out = self.reflecPad26(out)
out = self.conv26(out)
out = self.relu26(out)
out = self.reflecPad27(out)
out = self.conv27(out)
return out
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding, bias=True):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.act = torch.nn.PReLU()
self.bn = nn.InstanceNorm2d(output_size)
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
return self.act(out)
class discriminator(nn.Module):
def __init__(self, num_channels, base_filter):
super(discriminator, self).__init__()
self.input_conv = ConvBlock(num_channels, base_filter, 3, 1, 1)#512
self.conv1 = ConvBlock(base_filter, base_filter, 3, 1, 1)
self.max1 = nn.MaxPool2d(2, 2, 0)
self.conv2 = ConvBlock(base_filter, 2*base_filter, 3, 1, 1)
self.conv3 = ConvBlock(2*base_filter, 2*base_filter, 3, 1, 1)
self.max2 = nn.MaxPool2d(2, 2, 0)
self.conv4 = ConvBlock(2*base_filter, 4*base_filter, 3, 1, 1)
self.conv5 = ConvBlock(4*base_filter, 4*base_filter, 3, 1, 1)
self.max3 = nn.MaxPool2d(2, 2, 0)
self.conv6 = ConvBlock(4*base_filter, 8*base_filter, 3, 1, 1)
self.conv7 = ConvBlock(8*base_filter, 8*base_filter, 3, 1, 1)
self.weight1 = nn.Conv2d(2*base_filter, 1, 3, 1, 1)
self.weight2 = nn.Conv2d(4*base_filter, 1, 3, 1, 1)
self.weight3 = nn.Conv2d(8*base_filter, 1, 3, 1, 1)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('ConvTranspose2d') != -1:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.input_conv(x)
out = self.max1(self.conv1(out))
out = self.conv3(self.conv2(out))
feat1 = self.weight1(out)
out = self.conv5(self.conv4(self.max2(out)))
feat2 = self.weight2(out)
out = self.conv7(self.conv6(self.max3(out)))
feat3 = self.weight3(out)
b = x.shape[0]
out = torch.cat((feat1.view(b, -1), feat2.view(b, -1), feat3.view(b, -1)), 1)
return out
class discriminator_v2(nn.Module):
def __init__(self, num_channels, base_filter):
super(discriminator_v2, self).__init__()
self.input_conv = nn.Conv2d(num_channels*2, base_filter, 4, 2, 1)#512*256
self.conv1 = nn.Conv2d(base_filter, base_filter * 2, 4, 2, 1)
self.norm1 = nn.InstanceNorm2d(base_filter * 2)
self.conv2 = nn.Conv2d(base_filter * 2, base_filter * 4, 4, 2, 1)
self.norm2 = nn.InstanceNorm2d(base_filter * 4)
self.conv3 = nn.Conv2d(base_filter * 4, base_filter * 8, 4, 2, 1)
self.norm3 = nn.InstanceNorm2d(base_filter * 8)
self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.weight = nn.Conv2d(base_filter * 8, 1, 3, 1, 1)
self.down = nn.UpsamplingBilinear2d(scale_factor=0.5)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('ConvTranspose2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def encode(self, x):
feat1 = self.act(self.input_conv(x))
feat1 = self.act(self.norm1(self.conv1(feat1)))
feat2 = self.act(self.norm2(self.conv2(feat1)))
feat3 = self.act(self.norm3(self.conv3(feat2)))
out3 = self.weight(feat3).view(feat3.shape[0], -1)
feat = torch.cat((feat1.view(feat1.shape[0], -1), feat2.view(feat2.shape[0], -1), feat3.view(feat3.shape[0], -1)), 1)
return feat, out3
def forward(self, x, y):
x = torch.cat((x, y), 1)
feat1, prob1 = self.encode(x)
x = self.down(x)
feat2, prob2 = self.encode(x)
x = self.down(x)
feat3, prob3 = self.encode(x)
feat_out = torch.cat((feat1, feat2, feat3), 1)
prob_out = torch.cat((prob1, prob2, prob3), 1)
return feat_out, prob_out
class encoder6(nn.Module):
def __init__(self):
super(encoder6,self).__init__()
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,128,3,1,0)
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2)
# 56 x 56
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(128,256,3,1,0)
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(256,256,3,1,0)
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(256,256,3,1,0)
self.relu8 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(256,256,3,1,0)
self.relu9 = nn.ReLU(inplace=True)
# 56 x 56
self.maxPool3 = nn.MaxPool2d(kernel_size=2,stride=2)
# 28 x 28
self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(256,512,3,1,0)
self.relu10 = nn.ReLU(inplace=True)
# 28 x 28
def forward(self,x,sF=None,matrix11=None,matrix21=None,matrix31=None):
output = {}
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
output['r11'] = self.relu2(out)
out = self.reflecPad7(output['r11'])
out = self.conv3(out)
output['r12'] = self.relu3(out)
output['p1'] = self.maxPool(output['r12'])
out = self.reflecPad4(output['p1'])
out = self.conv4(out)
output['r21'] = self.relu4(out)
out = self.reflecPad7(output['r21'])
out = self.conv5(out)
output['r22'] = self.relu5(out)
output['p2'] = self.maxPool2(output['r22'])
out = self.reflecPad6(output['p2'])
out = self.conv6(out)
output['r31'] = self.relu6(out)
if(matrix31 is not None):
feature3,transmatrix3 = matrix31(output['r31'],sF['r31'])
out = self.reflecPad7(feature3)
else:
out = self.reflecPad7(output['r31'])
out = self.conv7(out)
output['r32'] = self.relu7(out)
out = self.reflecPad8(output['r32'])
out = self.conv8(out)
output['r33'] = self.relu8(out)
out = self.reflecPad9(output['r33'])
out = self.conv9(out)
output['r34'] = self.relu9(out)
output['p3'] = self.maxPool3(output['r34'])
out = self.reflecPad10(output['p3'])
out = self.conv10(out)
output['r41'] = self.relu10(out)
return output
class decoder6(nn.Module):
def __init__(self):
super(decoder6,self).__init__()
# decoder
self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(512,256,3,1,0)
self.relu11 = nn.ReLU(inplace=True)
# 28 x 28
self.unpool1 = nn.ConvTranspose2d(256, 256, 4, 2, 1)
self.act1 = nn.ReLU()
# 56 x 56
self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
self.conv12 = nn.Conv2d(256,256,3,1,0)
self.relu12 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
self.conv13 = nn.Conv2d(256,256,3,1,0)
self.relu13 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
self.conv14 = nn.Conv2d(256,256,3,1,0)
self.relu14 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
self.conv15 = nn.Conv2d(256,128,3,1,0)
self.relu15 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool2 = nn.ConvTranspose2d(128, 128, 4, 2, 1)
self.act2 = nn.ReLU()
# 112 x 112
self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
self.conv16 = nn.Conv2d(128,128,3,1,0)
self.relu16 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
self.conv17 = nn.Conv2d(128,64,3,1,0)
self.relu17 = nn.ReLU(inplace=True)
# 112 x 112
self.unpool3 = nn.ConvTranspose2d(64, 64, 4, 2, 1)
self.act3 = nn.ReLU()
# 224 x 224
self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
self.conv18 = nn.Conv2d(64,64,3,1,0)
self.relu18 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
self.conv19 = nn.Conv2d(64,3,3,1,0)
def forward(self,x):
# decoder
out = self.reflecPad11(x)
out = self.conv11(out)
out = self.relu11(out)
out = self.act1(self.unpool1(out))
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
out = self.reflecPad15(out)
out = self.conv15(out)
out = self.relu15(out)
out = self.act2(self.unpool2(out))
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.act3(self.unpool3(out))
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
return out
# class decoder6(nn.Module):
# def __init__(self):
# super(decoder6,self).__init__()
# # decoder
# self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
# self.conv11 = nn.Conv2d(512,256,3,1,0)
# self.relu11 = nn.ReLU(inplace=True)
# # 28 x 28
#
# self.unpool = nn.UpsamplingBilinear2d(scale_factor=2)
# # 56 x 56
#
# self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
# self.conv12 = nn.Conv2d(256,256,3,1,0)
# self.relu12 = nn.ReLU(inplace=True)
# # 56 x 56
#
# self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
# self.conv13 = nn.Conv2d(256,256,3,1,0)
# self.relu13 = nn.ReLU(inplace=True)
# # 56 x 56
#
# self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
# self.conv14 = nn.Conv2d(256,256,3,1,0)
# self.relu14 = nn.ReLU(inplace=True)
# # 56 x 56
#
# self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
# self.conv15 = nn.Conv2d(256,128,3,1,0)
# self.relu15 = nn.ReLU(inplace=True)
# # 56 x 56
#
# self.unpool2 = nn.UpsamplingBilinear2d(scale_factor=2)
# # 112 x 112
#
# self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
# self.conv16 = nn.Conv2d(128,128,3,1,0)
# self.relu16 = nn.ReLU(inplace=True)
# # 112 x 112
#
# self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
# self.conv17 = nn.Conv2d(128,64,3,1,0)
# self.relu17 = nn.ReLU(inplace=True)
# # 112 x 112
#
# self.unpool3 = nn.UpsamplingBilinear2d(scale_factor=2)
# # 224 x 224
#
# self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
# self.conv18 = nn.Conv2d(64,64,3,1,0)
# self.relu18 = nn.ReLU(inplace=True)
# # 224 x 224
#
# self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
# self.conv19 = nn.Conv2d(64,3,3,1,0)
#
# def forward(self,x):
# # decoder
# out = self.reflecPad11(x)
# out = self.conv11(out)
# out = self.relu11(out)
# out = self.unpool(out)
# out = self.reflecPad12(out)
# out = self.conv12(out)
#
# out = self.relu12(out)
# out = self.reflecPad13(out)
# out = self.conv13(out)
# out = self.relu13(out)
# out = self.reflecPad14(out)
# out = self.conv14(out)
# out = self.relu14(out)
# out = self.reflecPad15(out)
# out = self.conv15(out)
# out = self.relu15(out)
# out = self.unpool2(out)
# out = self.reflecPad16(out)
# out = self.conv16(out)
# out = self.relu16(out)
# out = self.reflecPad17(out)
# out = self.conv17(out)
# out = self.relu17(out)
# out = self.unpool3(out)
# out = self.reflecPad18(out)
# out = self.conv18(out)
# out = self.relu18(out)
# out = self.reflecPad19(out)
# out = self.conv19(out)
# return out | 31,991 | 31.611621 | 125 | py |
RefVAE | RefVAE-main/model.py | import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torchvision import models
class ncc_test(nn.Module):
"""Residual Channel Attention Networks.
Paper: Image Super-Resolution Using Very Deep Residual Channel Attention
Networks
Ref git repo: https://github.com/yulunzhang/RCAN.
Args:
num_in_ch (int): Channel number of inputs.
num_out_ch (int): Channel number of outputs.
num_feat (int): Channel number of intermediate features.
Default: 64.
num_group (int): Number of ResidualGroup. Default: 10.
num_block (int): Number of RCAB in ResidualGroup. Default: 16.
squeeze_factor (int): Channel squeeze factor. Default: 16.
upscale (int): Upsampling factor. Support 2^n and 3.
Default: 4.
res_scale (float): Used to scale the residual in residual block.
Default: 1.
img_range (float): Image range. Default: 255.
rgb_mean (tuple[float]): Image mean in RGB orders.
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
"""
def __init__(self,
k,
patch_size,):
super(ncc_test, self).__init__()
self.k = k
self.patch_size = patch_size
self.unfold = nn.Unfold(kernel_size=(3, 3), stride=2)
self.fold = nn.Fold(output_size=(self.patch_size, self.patch_size), kernel_size=(3, 3), stride=2)
self.max = nn.MaxPool2d(3, stride=1, padding=1)
def forward(self, x, ref):
# x_gray = torch.min(x, dim=1, keepdim=True)[0]
x_gray = torch.max(self.max(x), dim=1, keepdim=True)[0] - torch.min(x, dim=1, keepdim=True)[0]
ref_gray = torch.mean(ref, dim=1, keepdim=True)
input_patch = self.unfold(x_gray)
ref_patch = self.unfold(ref_gray)
input_mu = torch.mean(input_patch, dim=2, keepdim=True)
ref_mu = torch.mean(ref_patch, dim=2, keepdim=True)
input_norm = input_patch - input_mu
input_len = input_patch.norm(dim=1, keepdim=True)
input_norm = input_norm / input_len
input_norm_t = input_norm.permute(0, 2, 1)
ref_norm = ref_patch - ref_mu
ref_len = ref_patch.norm(dim=1, keepdim=True)
ref_norm = ref_norm / ref_len
ncc = torch.bmm(input_norm_t, ref_norm)
# idx = torch.argmax(ncc, dim=2)
idx = torch.topk(ncc, k=self.k, dim=2)[1]
x_rec = torch.zeros(input_patch.shape[0], input_patch.shape[1], input_patch.shape[2]).type_as(input_patch)
for i in range(self.k):
for j in range(input_patch.shape[0]):
t = idx[j:j+1, :, i:i+1].squeeze(2)
x_rec[j:j+1, :, :] = ref_patch[j:j+1, :, t].squeeze(2)
x = torch.cat((x, self.fold(x_rec)), dim=1)
return x
def norm_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
normalized_feat = (feat - feat_mean.expand(
size)) / feat_std.expand(size)
return normalized_feat
class VAE_v1(nn.Module):
def __init__(self, up_factor):
super(VAE_v1, self).__init__()
self.up_factor = up_factor
self.init_feat = nn.Sequential(
nn.Conv2d(3, 64, 3, 1, 1),
nn.InstanceNorm2d(64),
nn.LeakyReLU(),
nn.MaxPool2d(2, 2, 0),
nn.Conv2d(64, 128, 3, 1, 1),
nn.InstanceNorm2d(128),
nn.LeakyReLU(),
nn.MaxPool2d(2, 2, 0),
nn.Conv2d(128, 256, 3, 1, 1),
nn.InstanceNorm2d(256),
nn.LeakyReLU(),
nn.MaxPool2d(2, 2, 0),
nn.Conv2d(256, 512, 3, 1, 1),
nn.InstanceNorm2d(512),
nn.LeakyReLU(),
nn.MaxPool2d(2, 2, 0),
nn.Conv2d(512, 512, 3, 1, 1),
nn.InstanceNorm2d(512),
nn.LeakyReLU(),
nn.MaxPool2d(2, 2, 0),
nn.Conv2d(512, 512, 3, 1, 1),
nn.InstanceNorm2d(512),
nn.LeakyReLU(),
nn.MaxPool2d(2, 2, 0),
)
self.VAE_encoder = nn.Sequential(
nn.Linear(8192, 512),
# nn.ReLU(),
# nn.Linear(2048, 512),
)
self.VAE_decoder = nn.Sequential(
nn.Linear(256, 16384),
)
self.up1 = nn.Sequential(
nn.Conv2d(16, 256, 3, 1, 1),
nn.LeakyReLU(),
ResnetBlock(256, 3, 1, 1),
nn.InstanceNorm2d(256)
)
self.mask1 = nn.Sequential(
nn.Conv2d(3, 256, 3, 1, 1),
# nn.InstanceNorm2d(256),
nn.LeakyReLU(),
nn.Conv2d(256, 512, 1, 1, 0)
)
self.up2 = nn.Sequential(
nn.Conv2d(256, 128, 3, 1, 1),
nn.LeakyReLU(),
ResnetBlock(128, 3, 1, 1),
nn.InstanceNorm2d(128)
)
self.mask2 = nn.Sequential(
nn.Conv2d(3, 128, 3, 1, 1),
# nn.InstanceNorm2d(128),
nn.LeakyReLU(),
nn.Conv2d(128, 256, 1, 1, 0)
)
self.up3 = nn.Sequential(
nn.Conv2d(128, 64, 3, 1, 1),
nn.LeakyReLU(),
ResnetBlock(64, 3, 1, 1),
nn.InstanceNorm2d(64)
)
self.mask3 = nn.Sequential(
nn.Conv2d(3, 64, 3, 1, 1),
# nn.InstanceNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 128, 1, 1, 0)
)
self.up4 = nn.Sequential(
nn.Conv2d(64, 64, 3, 1, 1),
nn.LeakyReLU(),
ResnetBlock(64, 3, 1, 1),
nn.InstanceNorm2d(64)
)
self.mask4 = nn.Sequential(
nn.Conv2d(3, 64, 3, 1, 1),
# nn.InstanceNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 128, 1, 1, 0)
)
self.recon = nn.Sequential(
nn.Conv2d(64, 64, 3, 1, 1),
nn.LeakyReLU(),
nn.Conv2d(64, 3, 3, 1, 1),
nn.Tanh()
)
self.pix_up = nn.Upsample(scale_factor=2, mode='bilinear')
self.mu_act = nn.Sigmoid()
self.var_act = nn.Hardtanh(min_val=-4.5, max_val=0)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std) + mu
def encode(self, HR):
# initial
HR_feat = self.init_feat(HR)
# encoder
z_q_mu, z_q_logvar = self.VAE_encoder(HR_feat.view(HR_feat.size(0), -1)).chunk(2, dim=1)
return z_q_mu, z_q_logvar
def decode(self, LR, z_q):
dec_feat = self.VAE_decoder(z_q).view(LR.size(0), 16, 32, 32)
# reconstruction
up1 = self.up1(dec_feat)
# up1 = norm_mean_std(up1)
up1 = F.interpolate(up1, size=[LR.shape[2], LR.shape[3]], mode='bicubic')
mu1, var1 = self.mask1(LR).chunk(2, dim=1)
up1 = up1 * (1 + var1) + mu1
up1 = self.pix_up(up1)
up2 = self.up2(up1)
# up2 = norm_mean_std(up2)
mu2, var2 = self.mask2(F.interpolate(LR, scale_factor=2, mode='bicubic')).chunk(2, dim=1)
up2 = up2 * (1 + var2) + mu2
up2 = self.pix_up(up2)
up3 = self.up3(up2)
# up3 = norm_mean_std(up3)
mu3, var3 = self.mask3(F.interpolate(LR, scale_factor=4, mode='bicubic')).chunk(2, dim=1)
up3 = up3 * (1 + var3) + mu3
up3 = self.pix_up(up3)
up4 = self.up4(up3)
# up4 = norm_mean_std(up4)
mu4, var4 = self.mask4(F.interpolate(LR, scale_factor=8, mode='bicubic')).chunk(2, dim=1)
up4 = up4 * (1 + var4) + mu4
SR = self.recon(up4)
return SR
def forward(self, LR, HR=None, z_q=None):
# encode
if z_q is None:
bic = F.interpolate(LR, scale_factor=self.up_factor, mode='bicubic')
z_q_mu, z_q_logvar= self.encode(HR - bic)
z_q = self.reparameterize(z_q_mu, z_q_logvar)
KL = -0.5 * torch.sum(1 + z_q_logvar - z_q_mu.pow(2) - z_q_logvar.exp())
# decode
SR = self.decode(LR, z_q)
return SR, KL
else:
SR= self.decode(LR, z_q)
return SR
class VAE_v2(nn.Module):
def __init__(self, up_factor):
super(VAE_v2, self).__init__()
self.up_factor = up_factor
self.init_feat = nn.Sequential(
nn.Conv2d(512, 64, 1, 1, 0),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, 3, 1, 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
)
self.VAE_encoder = nn.Sequential(
nn.Linear(16384, 512),
# nn.ReLU(),
# nn.Linear(2048, 512),
)
self.VAE_decoder = nn.Sequential(
nn.Linear(256, 16384),
)
self.dec_feat = nn.Sequential(
nn.Conv2d(64, 512, 1, 1, 0),
# nn.LeakyReLU(),
ResnetBlock(512, 3, 1, 1),
)
self.mask = nn.Sequential(
nn.Conv2d(512, 512, 1, 1, 0),
# nn.Dropout2d(p=0.5),
ResnetBlock(512, 3, 1, 1),
ResnetBlock(512, 3, 1, 1),
ResnetBlock(512, 3, 1, 1),
nn.Conv2d(512, 1024, 1, 1, 0),
)
self.mu_act = nn.Sigmoid()
self.var_act = nn.Hardtanh(min_val=-4.5, max_val=0)
self.bn = nn.BatchNorm1d(256)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std) + mu
def encode(self, HR_feat):
# initial
HR_feat = self.init_feat(HR_feat)
# encoder
HR_feat = F.interpolate(HR_feat, size=[16, 16], mode='bilinear')
z_q_mu, z_q_logvar = self.VAE_encoder(HR_feat.view(HR_feat.size(0), -1)).chunk(2, dim=1)
# z_q_mu = self.bn(z_q_mu)
return z_q_mu, z_q_logvar
def decode(self, LR_feat, z_q):
dec_feat = self.VAE_decoder(z_q).view(LR_feat.size(0), 64, 16, 16)
# reconstruction
dec_feat = F.interpolate(dec_feat, size=[LR_feat.shape[2], LR_feat.shape[3]], mode='bilinear')
feat = self.dec_feat(dec_feat)
mu, var = self.mask(LR_feat).chunk(2, dim=1)
feat = feat * (1 + var) + mu
return feat
def forward(self, LR_feat, HR_feat=None, z_q=None):
# encode
if z_q is None:
z_q_mu, z_q_logvar= self.encode(HR_feat)
z_q = self.reparameterize(z_q_mu, z_q_logvar)
KL = -0.5 * torch.sum(1 + z_q_logvar - z_q_mu.pow(2) - z_q_logvar.exp())
# decode
SR = self.decode(LR_feat, z_q)
return SR, KL
else:
SR= self.decode(LR_feat, z_q)
return SR
class VAE_v3_8x(nn.Module):
def __init__(self, up_factor):
super(VAE_v3_8x, self).__init__()
self.up_factor = up_factor
self.init_feat = nn.Sequential(
nn.Conv2d(512, 64, 1, 1, 0),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, 3, 1, 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
)
self.VAE_encoder = nn.Sequential(
nn.Linear(16384, 512),
# nn.ReLU(),
# nn.Linear(2048, 512),
)
self.VAE_decoder = nn.Sequential(
nn.Linear(256, 16384),
)
self.dec_feat = nn.Sequential(
nn.Conv2d(64, 512, 1, 1, 0),
# nn.LeakyReLU(),
ResnetBlock(512, 3, 1, 1),
)
self.mask = nn.Sequential(
nn.Conv2d(512, 512, 1, 1, 0),
# nn.Dropout2d(p=0.5),
ResnetBlock(512, 3, 1, 1),
ResnetBlock(512, 3, 1, 1),
ResnetBlock(512, 3, 1, 1),
nn.Conv2d(512, 1024, 1, 1, 0),
)
self.decoder = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(512, 256, 3, 1, 1),
nn.LeakyReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(256, 128, 3, 1, 1),
nn.LeakyReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(128, 64, 3, 1, 1),
nn.LeakyReLU(),
nn.Conv2d(64, 64, 1, 1, 0),
nn.LeakyReLU(),
nn.Conv2d(64, 3, 3, 1, 1),
)
self.mu_act = nn.Sigmoid()
self.var_act = nn.Hardtanh(min_val=-4.5, max_val=0)
self.bn = nn.BatchNorm1d(256)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std) + mu
def encode(self, HR_feat):
# initial
HR_feat = self.init_feat(HR_feat)
# encoder
HR_feat = F.interpolate(HR_feat, size=[16, 16], mode='bilinear')
z_q_mu, z_q_logvar = self.VAE_encoder(HR_feat.view(HR_feat.size(0), -1)).chunk(2, dim=1)
# z_q_mu = self.bn(z_q_mu)
return z_q_mu, z_q_logvar
def decode(self, LR, LR_feat, z_q):
dec_feat = self.VAE_decoder(z_q).view(LR_feat.size(0), 64, 16, 16)
# reconstruction
dec_feat = F.interpolate(dec_feat, size=[LR_feat.shape[2], LR_feat.shape[3]], mode='bilinear')
feat = self.dec_feat(dec_feat)
mu, var = self.mask(LR_feat).chunk(2, dim=1)
feat = feat * (1 + var) + mu
SR = self.decoder(feat) + F.interpolate(LR, scale_factor=8, mode='bicubic')
return SR
def forward(self, LR, LR_feat, HR_feat=None, z_q=None):
# encode
if z_q is None:
z_q_mu, z_q_logvar= self.encode(HR_feat)
z_q = self.reparameterize(z_q_mu, z_q_logvar)
KL = -0.5 * torch.sum(1 + z_q_logvar - z_q_mu.pow(2) - z_q_logvar.exp())
# decode
SR = self.decode(LR, LR_feat, z_q)
return SR, KL
else:
SR= self.decode(LR, LR_feat, z_q)
return SR
class VAE_v3_4x(nn.Module):
def __init__(self, up_factor):
super(VAE_v3_4x, self).__init__()
self.up_factor = up_factor
self.init_feat = nn.Sequential(
nn.Conv2d(512, 64, 1, 1, 0),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, 3, 1, 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
)
self.VAE_encoder = nn.Sequential(
nn.Linear(16384, 512),
# nn.ReLU(),
# nn.Linear(2048, 512),
)
self.VAE_decoder = nn.Sequential(
nn.Linear(256, 16384),
)
self.dec_feat = nn.Sequential(
nn.Conv2d(64, 512, 1, 1, 0),
# nn.LeakyReLU(),
ResnetBlock(512, 3, 1, 1),
)
self.mask = nn.Sequential(
nn.Conv2d(512, 512, 1, 1, 0),
# nn.Dropout2d(p=0.5),
ResnetBlock(512, 3, 1, 1),
ResnetBlock(512, 3, 1, 1),
ResnetBlock(512, 3, 1, 1),
nn.Conv2d(512, 1024, 1, 1, 0),
)
self.decoder = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(512, 256, 3, 1, 1),
nn.LeakyReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(256, 128, 3, 1, 1),
nn.LeakyReLU(),
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(128, 64, 3, 1, 1),
nn.LeakyReLU(),
nn.Conv2d(64, 64, 1, 1, 0),
nn.LeakyReLU(),
nn.Conv2d(64, 3, 3, 1, 1),
)
self.mu_act = nn.Sigmoid()
self.var_act = nn.Hardtanh(min_val=-4.5, max_val=0)
self.bn = nn.BatchNorm1d(256)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std) + mu
def encode(self, HR_feat):
# initial
HR_feat = self.init_feat(HR_feat)
# encoder
HR_feat = F.interpolate(HR_feat, size=[16, 16], mode='bilinear')
z_q_mu, z_q_logvar = self.VAE_encoder(HR_feat.view(HR_feat.size(0), -1)).chunk(2, dim=1)
# z_q_mu = self.bn(z_q_mu)
return z_q_mu, z_q_logvar
def decode(self, LR, LR_feat, z_q):
dec_feat = self.VAE_decoder(z_q).view(LR_feat.size(0), 64, 16, 16)
# reconstruction
dec_feat = F.interpolate(dec_feat, size=[LR_feat.shape[2], LR_feat.shape[3]], mode='bilinear')
feat = self.dec_feat(dec_feat)
mu, var = self.mask(LR_feat).chunk(2, dim=1)
feat = feat * (1 + var) + mu
SR = self.decoder(feat) + F.interpolate(LR, scale_factor=4, mode='bicubic')
return SR
def forward(self, LR, LR_feat, HR_feat=None, z_q=None):
# encode
if z_q is None:
z_q_mu, z_q_logvar= self.encode(HR_feat)
z_q = self.reparameterize(z_q_mu, z_q_logvar)
KL = -0.5 * torch.sum(1 + z_q_logvar - z_q_mu.pow(2) - z_q_logvar.exp())
# decode
SR = self.decode(LR, LR_feat, z_q)
return SR, KL
else:
SR= self.decode(LR, LR_feat, z_q)
return SR
class generator(nn.Module):
def __init__(self, input_num, base_filter):
super(generator, self).__init__()
# backbone
self.input_conv = nn.Linear(256, 16384)
self.conv1 = nn.Conv2d(16, base_filter * 4, 3, 1, 1)
self.norm1 = nn.InstanceNorm2d(base_filter * 4)
self.res1 = ResnetBlock(base_filter * 4, 3, 1, 1)
self.conv2 = nn.Conv2d(base_filter * 4, base_filter * 2, 3, 1, 1)
self.norm2 = nn.InstanceNorm2d(base_filter * 2)
self.res2 = ResnetBlock(base_filter * 2, 3, 1, 1)
self.conv3 = nn.Conv2d(base_filter * 2, base_filter, 3, 1, 1)
self.norm3 = nn.InstanceNorm2d(base_filter)
self.res3 = ResnetBlock(base_filter, 3, 1, 1)
self.act = nn.LeakyReLU()
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
# condition
self.feat = nn.Conv2d(input_num, base_filter * 4, 3, 1, 1)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('ConvTranspose2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def encode(self, x):
feat = self.act(self.input_conv(x))
out1 = self.act(self.norm1(self.conv1(feat)))
out2 = self.act(self.norm2(self.conv2(out1)))
out3 = self.act(self.norm3(self.conv3(out2)))
prob = self.weight(out3)
b = feat.shape[0]
prob = prob.view(b, -1)
feat = feat.view(b, -1)
out1 = out1.view(b, -1)
out2 = out2.view(b, -1)
out3 = out3.view(b, -1)
out = torch.cat((feat, out1, out2, out3), 1)
return out, prob
def forward(self, x):
# x = torch.cat((x, y), 1)
feat1, prob1 = self.encode(x)
x = self.down(x)
feat2, prob2 = self.encode(x)
x = self.down(x)
feat3, prob3 = self.encode(x)
feat_out = torch.cat((feat1, feat2, feat3), 1)
prob_out = torch.cat((prob1, prob2, prob3), 1)
return feat_out, prob_out
class discriminator_v2(nn.Module):
def __init__(self, num_channels, base_filter):
super(discriminator_v2, self).__init__()
self.input_conv = nn.Conv2d(num_channels, base_filter, 3, 1, 1)#512*256
self.conv1 = nn.Conv2d(base_filter, base_filter * 2, 4, 2, 1)
self.norm1 = nn.InstanceNorm2d(base_filter * 2)
self.conv2 = nn.Conv2d(base_filter * 2, base_filter * 4, 4, 2, 1)
self.norm2 = nn.InstanceNorm2d(base_filter * 4)
self.conv3 = nn.Conv2d(base_filter * 4, base_filter * 8, 4, 2, 1)
self.norm3 = nn.InstanceNorm2d(base_filter * 8)
self.act = nn.LeakyReLU(0.2, False)
self.weight = nn.Conv2d(base_filter * 8, 1, 3, 1, 1)
self.down = nn.MaxPool2d(3, stride=2, padding=[1, 1])
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('ConvTranspose2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def encode(self, x):
feat = self.act(self.input_conv(x))
out1 = self.act(self.norm1(self.conv1(feat)))
out2 = self.act(self.norm2(self.conv2(out1)))
out3 = self.act(self.norm3(self.conv3(out2)))
prob = self.weight(out3)
b = feat.shape[0]
prob = prob.view(b, -1)
feat = feat.view(b, -1)
out1 = out1.view(b, -1)
out2 = out2.view(b, -1)
out3 = out3.view(b, -1)
out = torch.cat((feat, out1, out2, out3), 1)
return out, prob
def forward(self, x):
# x = torch.cat((x, y), 1)
feat1, prob1 = self.encode(x)
x = self.down(x)
feat2, prob2 = self.encode(x)
x = self.down(x)
feat3, prob3 = self.encode(x)
feat_out = torch.cat((feat1, feat2, feat3), 1)
prob_out = torch.cat((prob1, prob2, prob3), 1)
return feat_out, prob_out
class discriminator_v3(nn.Module):
def __init__(self, num_channels, base_filter, down_factor):
super(discriminator_v3, self).__init__()
self.down_factor = down_factor
self.input_conv = nn.Conv2d(num_channels*down_factor*down_factor, base_filter, 1, 1, 0)#512*256
self.bn = nn.InstanceNorm2d(base_filter)
self.conv1 = nn.Conv2d(base_filter, base_filter, 3, 1, 1)
self.norm1 = nn.InstanceNorm2d(base_filter)
self.conv2 = nn.Conv2d(base_filter, base_filter, 4, 2, 1)
self.norm2 = nn.InstanceNorm2d(base_filter)
self.conv3 = nn.Conv2d(base_filter, base_filter, 4, 2, 1)
self.norm3 = nn.InstanceNorm2d(base_filter)
self.conv4 = nn.Conv2d(base_filter, base_filter, 4, 2, 1)
self.norm4 = nn.InstanceNorm2d(base_filter)
self.act = nn.LeakyReLU(0.2, False)
self.weight = nn.Conv2d(base_filter, base_filter, 3, 1, 1)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('ConvTranspose2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
# x = torch.cat((x, y), 1)
x = pixel_unshuffle(x, downscale_factor=self.down_factor)
feat = self.bn(self.input_conv(x))
out1 = self.act(self.norm1(self.conv1(feat)))
out2 = self.act(self.norm2(self.conv2(out1)))
out3 = self.act(self.norm3(self.conv3(out2)))
out4 = self.act(self.norm4(self.conv4(out3)))
prob = self.weight(out4)
b = feat.shape[0]
prob = prob.view(b, -1)
out1 = out1.view(b, -1)
out2 = out2.view(b, -1)
out3 = out3.view(b, -1)
feat_out = torch.cat((out1, out2, out3), 1)
return feat_out, prob
### Define Vgg19 for projected distribution loss
class Vgg19_feat(nn.Module):
def __init__(self, device, requires_grad=False):
super(Vgg19_feat, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = nn.Sequential()
self.slice2 = nn.Sequential()
self.slice3 = nn.Sequential()
self.slice4 = nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
# fixed pretrained vgg19 model for feature extraction
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
self.L1_loss = nn.L1Loss(size_average=False).to(device)
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
self.register_buffer('mean', mean)
self.register_buffer('std', std)
def feat_extract(self, x):
x = (x - self.mean) / self.std
h = self.slice1(x)
h_relu1_1 = h
h = self.slice2(h)
h_relu2_1 = h
h = self.slice3(h)
h_relu3_1 = h
h = self.slice4(h)
h_relu4_1 = h
return h_relu1_1, h_relu2_1, h_relu3_1, h_relu4_1
def forward(self, x, y):
x_feat1, x_feat2, x_feat3, x_feat4 = self.feat_extract(x)
y_feat1, y_feat2, y_feat3, y_feat4 = self.feat_extract(y)
ContentLoss = self.L1_loss(x_feat4, y_feat4)
# style loss
StyleLoss = 0
mean_x, var_x = calc_mean_std(x_feat1)
mean_style, var_style = calc_mean_std(y_feat1)
StyleLoss = StyleLoss + self.L1_loss(mean_x, mean_style)
StyleLoss = StyleLoss + self.L1_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(x_feat2)
mean_style, var_style = calc_mean_std(y_feat2)
StyleLoss = StyleLoss + self.L1_loss(mean_x, mean_style)
StyleLoss = StyleLoss + self.L1_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(x_feat3)
mean_style, var_style = calc_mean_std(y_feat3)
StyleLoss = StyleLoss + self.L1_loss(mean_x, mean_style)
StyleLoss = StyleLoss + self.L1_loss(var_x, var_style)
mean_x, var_x = calc_mean_std(x_feat4)
mean_style, var_style = calc_mean_std(y_feat4)
StyleLoss = StyleLoss + self.L1_loss(mean_x, mean_style)
StyleLoss = StyleLoss + self.L1_loss(var_x, var_style)
return ContentLoss, StyleLoss
class Vgg19Conv2(nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19Conv2, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
# self.slice1 = nn.Sequential()
self.slice2 = nn.Sequential()
# for x in range(2):
# self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(23):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
# fixed pretrained vgg19 model for feature extraction
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
# out1 = self.slice1(x)
out2 = self.slice2(x)
return out2
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
class PDLoss(nn.Module):
def __init__(self, device, l1_lambda=1.5, w_lambda=0.01, average=True):
super(PDLoss, self).__init__()
self.vgg = Vgg19Conv2().to(device)
self.criterionL1 = nn.L1Loss(size_average=average)
self.w_lambda = w_lambda
self.l1_lambda = l1_lambda
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
self.register_buffer('mean', mean)
self.register_buffer('std', std)
def w_distance(self, xvgg, yvgg):
x_mean, x_var = calc_mean_std(xvgg)
y_mean, y_var = calc_mean_std(yvgg)
xvgg = xvgg / (torch.sum(xvgg, dim=(2, 3), keepdim=True) + 1e-14)
yvgg = yvgg / (torch.sum(yvgg, dim=(2, 3), keepdim=True) + 1e-14)
xvgg = xvgg.view(xvgg.size()[0], xvgg.size()[1], -1)
yvgg = yvgg.view(yvgg.size()[0], yvgg.size()[1], -1)
cdf_xvgg = torch.cumsum(xvgg, dim=-1)
cdf_yvgg = torch.cumsum(yvgg, dim=-1)
cdf_distance = torch.sum(torch.abs(cdf_xvgg - cdf_yvgg), dim=-1)
# cdf_loss = cdf_distance.mean()
cdf_loss = cdf_distance.sum()
mean_distance = torch.sum(torch.abs(x_mean - y_mean), dim=-1)
var_distance = torch.sum(torch.abs(x_var - y_var), dim=-1)
mean_var_loss = mean_distance.sum() + var_distance.sum()
return cdf_loss, mean_var_loss
def forward(self, x, y):
# L1loss = self.criterionL1(x, y) * self.l1_lambda
# L1loss = 0
x = (x - self.mean) / self.std
y = (y - self.mean) / self.std
x_vgg1 = self.vgg(x)
y_vgg1 = self.vgg(y)
WdLoss, mean_var_loss = self.w_distance(x_vgg1, y_vgg1)
WdLoss = WdLoss * self.w_lambda
# WdLoss_img = self.w_distance(x, y)
return WdLoss, mean_var_loss
############################################################################################
# Base models
############################################################################################
def rgb2ycbcr(img):
Y = 0. + .299 * img[:, 0] + .587 * img[:, 1] + .114 * img[:, 2]
return Y.unsqueeze(1)
class GatedDense(nn.Module):
def __init__(self, input_size, output_size, activation=None):
super(GatedDense, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Linear(input_size, output_size)
self.g = nn.Linear(input_size, output_size)
def forward(self, x):
h = self.h(x)
if self.activation is not None:
h = self.activation( self.h( x ) )
g = self.sigmoid( self.g( x ) )
return h * g
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding, bias=True):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.act = torch.nn.LeakyReLU()
def forward(self, x):
out = self.conv(x)
return self.act(out)
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding, bias=True):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.act = torch.nn.LeakyReLU()
def forward(self, x):
out = self.deconv(x)
return self.act(out)
class UpBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding):
super(UpBlock, self).__init__()
self.conv1 = DeconvBlock(input_size, output_size, kernel_size, stride, padding, bias=False)
self.conv2 = ConvBlock(output_size, output_size, kernel_size, stride, padding, bias=False)
self.conv3 = DeconvBlock(output_size, output_size, kernel_size, stride, padding, bias=False)
self.local_weight1 = nn.Conv2d(input_size, 2 * output_size, kernel_size=1, stride=1, padding=0, bias=False)
self.local_weight2 = nn.Conv2d(output_size, 2 * output_size, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
hr = self.conv1(x)
lr = self.conv2(hr)
mean, var = self.local_weight1(x).chunk(2, dim=1)
residue = mean + lr * (1 + var)
h_residue = self.conv3(residue)
mean, var = self.local_weight2(hr).chunk(2, dim=1)
return mean + h_residue * (1 + var)
class DownBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding):
super(DownBlock, self).__init__()
self.conv1 = ConvBlock(input_size, output_size, kernel_size, stride, padding, bias=False)
self.conv2 = DeconvBlock(output_size, output_size, kernel_size, stride, padding, bias=False)
self.conv3 = ConvBlock(output_size, output_size, kernel_size, stride, padding, bias=False)
self.local_weight1 = nn.Conv2d(input_size, 2 * output_size, kernel_size=1, stride=1, padding=0, bias=False)
self.local_weight2 = nn.Conv2d(output_size, 2 * output_size, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
lr = self.conv1(x)
hr = self.conv2(lr)
mean, var = self.local_weight1(x).chunk(2, dim=1)
residue = mean + hr * (1 + var)
l_residue = self.conv3(residue)
mean, var = self.local_weight2(lr).chunk(2, dim=1)
return mean + l_residue * (1 + var)
class ResnetBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=3, stride=1, padding=1, bias=True):
super(ResnetBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=bias)
self.conv2 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=bias)
self.act1 = torch.nn.LeakyReLU()
self.act2 = torch.nn.LeakyReLU()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = out + x
out = self.act2(out)
return out
class Self_attention(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding, scale):
super(Self_attention, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.scale = scale
self.K = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=True)
self.Q = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=True)
self.V = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=True)
self.pool = nn.MaxPool2d(kernel_size=self.scale + 2, stride=self.scale, padding=1)
if kernel_size == 1:
self.local_weight = torch.nn.Conv2d(output_size, input_size, kernel_size, stride, padding,
bias=True)
else:
self.local_weight = torch.nn.ConvTranspose2d(output_size, input_size, kernel_size, stride, padding,
bias=True)
def forward(self, x):
batch_size = x.size(0)
K = self.K(x)
Q = self.Q(x)
if self.stride > 1:
Q = self.pool(Q)
else:
Q = Q
V = self.V(x)
if self.stride > 1:
V = self.pool(V)
else:
V = V
V_reshape = V.view(batch_size, self.output_size, -1)
V_reshape = V_reshape.permute(0, 2, 1)
Q_reshape = Q.view(batch_size, self.output_size, -1)
K_reshape = K.view(batch_size, self.output_size, -1)
K_reshape = K_reshape.permute(0, 2, 1)
KQ = torch.matmul(K_reshape, Q_reshape)
attention = F.softmax(KQ, dim=-1)
vector = torch.matmul(attention, V_reshape)
vector_reshape = vector.permute(0, 2, 1).contiguous()
O = vector_reshape.view(batch_size, self.output_size, x.size(2) // self.stride, x.size(3) // self.stride)
W = self.local_weight(O)
output = x + W
return output
class Space_attention(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding, scale):
super(Space_attention, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.scale = scale
self.K = torch.nn.Conv2d(output_size, output_size, kernel_size, stride, padding, bias=True)
self.Q = torch.nn.Conv2d(output_size, output_size, kernel_size, stride, padding, bias=True)
self.V = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=True)
self.pool = nn.MaxPool2d(kernel_size=self.scale + 2, stride=self.scale, padding=1)
if kernel_size == 1:
self.local_weight = torch.nn.Conv2d(output_size, input_size, kernel_size, stride, padding,
bias=True)
else:
self.local_weight = torch.nn.ConvTranspose2d(output_size, input_size, kernel_size, stride, padding,
bias=True)
def forward(self, x, y):
batch_size = x.size(0)
K = self.K(x)
Q = self.Q(x)
if self.scale > 1:
Q = self.pool(Q)
else:
Q = Q
V = self.V(y)
if self.scale > 1:
V = F.interpolate(V, scale_factor=1 / self.scale, mode='bicubic')
else:
V = V
V_reshape = V.view(batch_size, self.output_size, -1)
V_reshape = V_reshape.permute(0, 2, 1)
Q_reshape = Q.view(batch_size, self.output_size, -1)
K_reshape = K.view(batch_size, self.output_size, -1)
K_reshape = K_reshape.permute(0, 2, 1)
KQ = torch.bmm(K_reshape, Q_reshape)
attention = F.softmax(KQ, dim=-1)
vector = torch.bmm(attention, V_reshape)
vector_reshape = vector.permute(0, 2, 1).contiguous()
O = vector_reshape.view(batch_size, self.output_size, x.size(2) // self.stride, x.size(3) // self.stride)
output = y + self.local_weight(O)
return output
class Cross_attention(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, stride, padding, scale, t=0):
super(Cross_attention, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.scale = scale
self.t = t
self.num = output_size // input_size
self.K = torch.nn.Conv2d(output_size, output_size, kernel_size, stride, padding, bias=True)
self.Q = torch.nn.Conv2d(output_size, output_size, kernel_size, stride, padding, bias=True)
self.V = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=True)
self.pool = nn.MaxPool2d(kernel_size=self.scale + 2, stride=self.scale, padding=1)
if kernel_size == 1:
self.local_weight = torch.nn.Conv2d(output_size, input_size, kernel_size, stride, padding,
bias=True)
else:
self.local_weight = torch.nn.ConvTranspose2d(output_size, input_size, kernel_size, stride, padding,
bias=True)
def forward(self, x, y):
batch_size = y.size(0)
K = self.K(x)
if self.t == 0:
if self.scale > 1:
K = self.pool(K)
Q = self.Q(x)
if self.t == 0:
if self.scale > 1:
Q = self.pool(Q)
V = self.V(y)
if self.t == 1:
if self.scale > 1:
V = self.pool(V)
V_reshape = V.view(batch_size, self.output_size, -1)
V_reshape = V_reshape.permute(0, 2, 1)
Q_reshape = Q.view(batch_size, self.output_size, -1)
K_reshape = K.view(batch_size, self.output_size, -1)
K_reshape = K_reshape.permute(0, 2, 1)
KQ = torch.bmm(K_reshape, Q_reshape)
attention = F.softmax(KQ, dim=-1)
vector = torch.bmm(attention, V_reshape)
vector_reshape = vector.permute(0, 2, 1).contiguous()
if self.t == 1:
O = vector_reshape.view(batch_size, self.output_size, x.size(2), x.size(3))
O = F.interpolate(O, scale_factor=self.scale, mode='nearest')
else:
O = vector_reshape.view(batch_size, self.output_size, y.size(2), y.size(3))
output = y + self.local_weight(O)
return output
######################################################################################
def pixel_unshuffle(input, downscale_factor):
'''
input: batchSize * c * k*w * k*h
kdownscale_factor: k
batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h
'''
c = input.shape[1]
kernel = torch.zeros(size=[downscale_factor * downscale_factor * c,
1, downscale_factor, downscale_factor],
device=input.device)
for y in range(downscale_factor):
for x in range(downscale_factor):
kernel[x + y * downscale_factor::downscale_factor*downscale_factor, 0, y, x] = 1
return F.conv2d(input, kernel, stride=downscale_factor, groups=c)
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. '
'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
def make_layer(basic_block, num_basic_block, **kwarg):
"""Make layers by stacking the same blocks.
Args:
basic_block (nn.module): nn.module class for basic block.
num_basic_block (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
layers = []
for _ in range(num_basic_block):
layers.append(basic_block(**kwarg))
return nn.Sequential(*layers)
class ChannelAttention(nn.Module):
"""Channel attention used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
"""
def __init__(self, num_feat, squeeze_factor=16):
super(ChannelAttention, self).__init__()
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0),
nn.Sigmoid())
def forward(self, x):
y = self.attention(x)
return x * y
class RCAB(nn.Module):
"""Residual Channel Attention Block (RCAB) used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
res_scale (float): Scale the residual. Default: 1.
"""
def __init__(self, num_feat, squeeze_factor=16, res_scale=1):
super(RCAB, self).__init__()
self.res_scale = res_scale
self.rcab = nn.Sequential(
nn.Conv2d(num_feat, num_feat, 3, 1, 1), nn.ReLU(True),
nn.Conv2d(num_feat, num_feat, 3, 1, 1),
ChannelAttention(num_feat, squeeze_factor))
def forward(self, x):
res = self.rcab(x) * self.res_scale
return res + x
class ResidualGroup(nn.Module):
"""Residual Group of RCAB.
Args:
num_feat (int): Channel number of intermediate features.
num_block (int): Block number in the body network.
squeeze_factor (int): Channel squeeze factor. Default: 16.
res_scale (float): Scale the residual. Default: 1.
"""
def __init__(self, num_feat, num_block, squeeze_factor=16, res_scale=1):
super(ResidualGroup, self).__init__()
self.residual_group = make_layer(
RCAB,
num_block,
num_feat=num_feat,
squeeze_factor=squeeze_factor,
res_scale=res_scale)
self.conv = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
def forward(self, x):
res = self.conv(self.residual_group(x))
return res + x
class one_conv(nn.Module):
def __init__(self, G0, G):
super(one_conv, self).__init__()
self.conv = nn.Conv2d(G0, G, kernel_size=3, stride=1, padding=1, bias=True)
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
output = self.relu(self.conv(x))
return torch.cat((x, output), dim=1)
class RDB(nn.Module):
def __init__(self, G0, C, G):
super(RDB, self).__init__()
convs = []
for i in range(C):
convs.append(one_conv(G0+i*G, G))
self.conv = nn.Sequential(*convs)
self.LFF = nn.Conv2d(G0+C*G, G0, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x):
out = self.conv(x)
lff = self.LFF(out)
return lff + x
class RDG(nn.Module):
def __init__(self, G0, C, G, n_RDB):
super(RDG, self).__init__()
self.n_RDB = n_RDB
RDBs = []
for i in range(n_RDB):
RDBs.append(RDB(G0, C, G))
self.RDB = nn.Sequential(*RDBs)
self.conv = nn.Conv2d(G0*n_RDB, G0, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x):
buffer = x
temp = []
for i in range(self.n_RDB):
buffer = self.RDB[i](buffer)
temp.append(buffer)
buffer_cat = torch.cat(temp, dim=1)
out = self.conv(buffer_cat)
return out
class CALayer(nn.Module):
def __init__(self, channel):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel//16, 1, padding=0, bias=True),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(channel//16, channel, 1, padding=0, bias=True),
nn.Sigmoid())
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
class ResB(nn.Module):
def __init__(self, channels):
super(ResB, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(channels, channels, 3, 1, 1, groups=4, bias=True),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(channels, channels, 3, 1, 1, groups=4, bias=True),
)
def __call__(self,x):
out = self.body(x)
return out + x
| 47,234 | 33.129335 | 116 | py |
RefVAE | RefVAE-main/dataset.py | import torch.utils.data as data
import torch
import numpy as np
import os
from os import listdir
from os.path import join
from PIL import Image, ImageOps, ImageEnhance
import random
from torchvision import transforms
from glob import glob
from imresize import imresize
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
# y, _, _ = img.split()
return img
# def rescale_img(img_in, scale):
# size_in = img_in.size
# new_size_in = tuple([int(x * scale) for x in size_in])
# img_in = img_in.resize(new_size_in, resample=Image.BICUBIC)
# return img_in
def rescale_img(img_in, size):
# size_in = img_in.size
new_size_in = tuple([size, size])
img_in = img_in.resize(new_size_in, resample=Image.BICUBIC)
return img_in
def get_patch(img_in, img_tar, patch_size, scale, ix=-1, iy=-1):
(ih, iw) = img_in.size
#(th, tw) = (scale * ih, scale * iw)
patch_mult = scale # if len(scale) > 1 else 1
tp = patch_mult * patch_size
ip = tp // scale
if ix == -1:
ix = random.randrange(0, iw - ip + 1)
# ix = torch.randint(0, iw - ip + 1, (1,)).item()
if iy == -1:
iy = random.randrange(0, ih - ip + 1)
# iy = torch.randint(0, ih - ip + 1, (1,)).item()
(tx, ty) = (scale * ix, scale * iy)
img_in = img_in.crop((iy, ix, iy + ip, ix + ip))
img_tar = img_tar.crop((ty, tx, ty + tp, tx + tp))
# img_ref = img_ref.crop((ty, tx, ty + tp, tx + tp))
#info_patch = {
# 'ix': ix, 'iy': iy, 'ip': ip, 'tx': tx, 'ty': ty, 'tp': tp}
return img_in, img_tar
def augment(img_in, img_tar, img_ref, flip_h=True, flip_v=True, rot=True):
if torch.rand(1).item() < 0.5 and flip_h:
img_in = ImageOps.flip(img_in)
img_tar = ImageOps.flip(img_tar)
img_ref = ImageOps.flip(img_ref)
if torch.rand(1).item() < 0.5 and flip_v:
img_in = ImageOps.mirror(img_in)
img_tar = ImageOps.mirror(img_tar)
img_ref = ImageOps.mirror(img_ref)
if torch.rand(1).item() < 0.5 and rot:
rot = torch.randint(1, 3, (1,)).item() * 90
img_in = img_in.rotate(rot)
img_tar = img_tar.rotate(rot)
img_ref = img_ref.rotate(rot)
return img_in, img_tar, img_ref
def rgb_permute(im1, im2):
im1 = np.array(im1)
im2 = np.array(im2)
# if np.random.rand(1) >= prob:
# return im1, im2
perm = np.random.permutation(3)
im1 = im1[:, :, perm]
im2 = im2[:, :, perm]
im1 = Image.fromarray(im1)
im2 = Image.fromarray(im2)
return im1, im2
def color_shift(img_in, img_tar):
color_factor = random.uniform(1, 1.5)
contrast_factor = random.uniform(1, 1.5)
bright_factor = random.uniform(1, 1.5)
# sharp_factor = random.uniform(0.5, 1)
if torch.rand(1).item() < 0.5:
img_tar = ImageEnhance.Color(img_tar).enhance(color_factor)
img_in = ImageEnhance.Color(img_in).enhance(color_factor)
if torch.rand(1).item() < 0.5:
img_tar = ImageEnhance.Contrast(img_tar).enhance(contrast_factor)
img_in = ImageEnhance.Contrast(img_in).enhance(contrast_factor)
if torch.rand(1).item() < 0.5:
img_tar = ImageEnhance.Brightness(img_tar).enhance(bright_factor)
img_in = ImageEnhance.Brightness(img_in).enhance(bright_factor)
# img_in = ImageEnhance.Sharpness(img_in).enhance(sharp_factor)
return img_in, img_tar
class DatasetFromFolder(data.Dataset):
def __init__(self, data_dir1, data_dir2, patch_size, up_factor, data_augmentation, transform=None):
super(DatasetFromFolder, self).__init__()
GT_dir = join(data_dir1, 'HR')
input_dir = join(data_dir1, 'LR')
self.gt_image_filenames = [join(GT_dir, x) for x in listdir(GT_dir) if is_image_file(x)]
self.input_image_filenames = [join(input_dir, x) for x in listdir(input_dir) if is_image_file(x)]
# GT_dir = join(data_dir2, 'HR')
# input_dir = join(data_dir2, 'LR')
# self.gt_image_filenames += [join(GT_dir, x) for x in listdir(GT_dir) if is_image_file(x)]
# self.input_image_filenames += [join(input_dir, x) for x in listdir(input_dir) if is_image_file(x)]
# ref_dir = '/home/server2/ZSLiu/style_transfer/Data/wikiart'
# self.ref_image_filenames = [join(ref_dir, x) for x in listdir(ref_dir) if is_image_file(x)]
self.gt_image_filenames = sorted(self.gt_image_filenames)
self.input_image_filenames = sorted(self.input_image_filenames)
self.patch_size = patch_size
self.up_factor = up_factor
self.transform = transform
self.data_augmentation = data_augmentation
def __getitem__(self, index):
target = load_img(self.gt_image_filenames[index])
input = load_img(self.input_image_filenames[index])
rand_no = torch.randint(0, len(self.gt_image_filenames), (1,)).item()
ref = load_img(self.gt_image_filenames[rand_no])
ref = rescale_img(ref, 256)
target = rescale_img(target, 288)
input = rescale_img(input, 288//self.up_factor)
input, target = get_patch(input, target, self.patch_size, scale=self.up_factor)
if self.data_augmentation:
input, target, ref = augment(input, target, ref)
# input, target = color_shift(input, target)
if self.transform:
input = self.transform(input)
target = self.transform(target)
ref = self.transform(ref)
return input, target, ref
def __len__(self):
return len(self.gt_image_filenames)
class DatasetFromFolder_new(data.Dataset):
def __init__(self, data_dir1, data_dir2, patch_size, up_factor, data_augmentation, transform=None):
super(DatasetFromFolder_new, self).__init__()
GT_dir = join(data_dir1, 'HR')
input_dir = join(data_dir1, 'LR')
self.gt_image_filenames = [join(GT_dir, x) for x in listdir(GT_dir) if is_image_file(x)]
self.input_image_filenames = [join(input_dir, x) for x in listdir(input_dir) if is_image_file(x)]
# GT_dir = join(data_dir2, 'HR')
# input_dir = join(data_dir2, 'LR')
# self.gt_image_filenames += [join(GT_dir, x) for x in listdir(GT_dir) if is_image_file(x)]
# self.input_image_filenames += [join(input_dir, x) for x in listdir(input_dir) if is_image_file(x)]
# ref_dir = '/home/server2/ZSLiu/style_transfer/Data/wikiart'
# self.ref_image_filenames = [join(ref_dir, x) for x in listdir(ref_dir) if is_image_file(x)]
self.gt_image_filenames = sorted(self.gt_image_filenames)
self.input_image_filenames = sorted(self.input_image_filenames)
self.patch_size = patch_size
self.up_factor = up_factor
self.transform = transform
self.data_augmentation = data_augmentation
def __getitem__(self, index):
target = load_img(self.gt_image_filenames[index])
input = imresize(np.array(target), 0.125)
input = Image.fromarray(np.unit8(input))
rand_no = torch.randint(0, len(self.gt_image_filenames), (1,)).item()
ref = load_img(self.gt_image_filenames[rand_no])
ref = rescale_img(ref, 256)
target = rescale_img(target, 288)
input = rescale_img(input, 36)
# ref = rescale_img(target, 256)
input, target = get_patch(input, target, self.patch_size, scale=self.up_factor)
if self.data_augmentation:
input, target, ref = augment(input, target, ref)
# input, target = color_shift(input, target)
if self.transform:
input = self.transform(input)
target = self.transform(target)
ref = self.transform(ref)
return input, target, ref
def __len__(self):
return len(self.gt_image_filenames)
class DatasetFromFolderEval(data.Dataset):
def __init__(self, data_dir, transform=None):
super(DatasetFromFolderEval, self).__init__()
data_dir = data_dir + 'hazy'
self.image_filenames = [join(data_dir, x) for x in listdir(data_dir) if is_image_file(x)]
self.transform = transform
def __getitem__(self, index):
input = load_img(self.image_filenames[index])
if self.transform:
input = self.transform(input)
return input
def __len__(self):
return len(self.image_filenames)
| 8,504 | 32.093385 | 108 | py |
RefVAE | RefVAE-main/data.py | from os.path import join
from torchvision import transforms
from dataset import DatasetFromFolderEval, DatasetFromFolder
def transform():
return transforms.Compose([
transforms.ToTensor(),
# Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# def transform(fineSize):
# return transforms.Compose([
# transforms.Scale(2*fineSize),
# transforms.RandomCrop(fineSize),
# transforms.RandomHorizontalFlip(),
# transforms.RandomVerticalFlip(),
# transforms.ToTensor()])
def get_training_set(data_dir, patch_size, up_factor, data_augmentation):
data1 = data_dir + 'DIV2K'
data2 = data_dir + 'Flickr2K'
return DatasetFromFolder(data1, data2, patch_size, up_factor, data_augmentation,
transform=transform())
def get_eval_set(data_dir):
return DatasetFromFolderEval(data_dir, transform=transform())
| 887 | 26.75 | 84 | py |
RefVAE | RefVAE-main/laploss.py | import numpy as np
from PIL import Image
import torch
from torch import nn
import torch.nn.functional as fnn
from torch.autograd import Variable
def build_gauss_kernel(size=5, sigma=1.0, n_channels=1, cuda=False):
if size % 2 != 1:
raise ValueError("kernel size must be uneven")
grid = np.float32(np.mgrid[0:size, 0:size].T)
gaussian = lambda x: np.exp((x - size // 2) ** 2 / (-2 * sigma ** 2)) ** 2
kernel = np.sum(gaussian(grid), axis=2)
kernel /= np.sum(kernel)
# repeat same kernel across depth dimension
kernel = np.tile(kernel, (n_channels, 1, 1))
# conv weight should be (out_channels, groups/in_channels, h, w),
# and since we have depth-separable convolution we want the groups dimension to be 1
kernel = torch.FloatTensor(kernel[:, None, :, :])
if cuda:
kernel = kernel.cuda()
return Variable(kernel, requires_grad=False)
def conv_gauss(img, kernel):
""" convolve img with a gaussian kernel that has been built with build_gauss_kernel """
n_channels, _, kw, kh = kernel.shape
img = fnn.pad(img, (kw // 2, kh // 2, kw // 2, kh // 2), mode='replicate')
return fnn.conv2d(img, kernel, groups=n_channels)
def laplacian_pyramid(img, kernel, max_levels=5):
current = img
pyr = []
for level in range(max_levels):
filtered = conv_gauss(current, kernel)
diff = current - filtered
pyr.append(diff)
current = fnn.avg_pool2d(filtered, 2)
pyr.append(current)
return pyr
class LapLoss(nn.Module):
def __init__(self, max_levels=5, k_size=5, sigma=2.0):
super(LapLoss, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
self.L1_loss = nn.L1Loss(size_average=False)
def forward(self, input, target):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(
size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1], cuda=input.is_cuda
)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)
return sum(self.L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
class LapMap(nn.Module):
def __init__(self, max_levels=5, k_size=5, sigma=2.0):
super(LapMap, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
def forward(self, input):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(
size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1], cuda=input.is_cuda
)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
return pyr_input | 3,025 | 35.457831 | 91 | py |
RefVAE | RefVAE-main/eval_4x.py | from __future__ import print_function
import argparse
import os
import torch
import cv2
from model import *
import torchvision.transforms as transforms
from collections import OrderedDict
import numpy as np
from os.path import join
import time
from network import encoder4, decoder4
import numpy
from dataset import is_image_file, rescale_img
from image_utils import *
from PIL import Image, ImageOps
from os import listdir
import torch.utils.data as utils
import os
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--testBatchSize', type=int, default=8, help='testing batch size')
parser.add_argument('--up_factor', type=int, default=4, help="super resolution upscale factor")
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--chop_forward', type=bool, default=True)
parser.add_argument('--use_img_self', action='store_true', help='using LR image itself or not')
parser.add_argument('--use_ref', action='store_true', help='using external reference images or not')
parser.add_argument('--num_sample', type=int, default=10, help='number of SR images')
parser.add_argument('--threads', type=int, default=6, help='number of threads for data loader to use')
parser.add_argument('--input_dataset', type=str, default='input')
parser.add_argument('--output_dataset', type=str, default='result')
parser.add_argument('--model_type', type=str, default='ConVAE')
parser.add_argument('--model', default='models/ConVAE_4x.pth', help='sr pretrained base model')
parser.add_argument("--encoder_dir", default='models/vgg_r41.pth', help='pre-trained encoder path')
parser.add_argument("--decoder_dir", default='models/dec_r41.pth', help='pre-trained encoder path')
opt = parser.parse_args()
print(opt)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('===> Building model ', opt.model_type)
model = VAE_v3_4x(up_factor=opt.up_factor)
enc = encoder4()
dec = decoder4()
if os.path.exists(opt.encoder_dir):
enc.load_state_dict(torch.load(opt.encoder_dir))
print('encoder model is loaded!')
if os.path.exists(opt.decoder_dir):
dec.load_state_dict(torch.load(opt.decoder_dir))
print('decoder model is loaded!')
for param in enc.parameters():
param.requires_grad = False
for param in dec.parameters():
param.requires_grad = False
model = model.to(device)
enc = enc.to(device)
dec = dec.to(device)
print('===> Loading datasets')
def eval():
model.eval()
enc.eval()
dec.eval()
if os.path.exists(opt.model):
model.load_state_dict(torch.load(opt.model, map_location=lambda storage, loc: storage))
print(opt.model)
Ref_filename = os.path.join(opt.input_dataset, 'Ref')
LR_filename = os.path.join(opt.input_dataset, 'LR')
SR_filename = opt.output_dataset
lr_image = [join(LR_filename, x) for x in listdir(LR_filename) if is_image_file(x)]
lr_image = sorted(lr_image)
ref_image = [join(Ref_filename, x) for x in listdir(Ref_filename) if is_image_file(x)]
ref_image = sorted(ref_image)
for i in range(len(lr_image)):
LR = Image.open(lr_image[i]).convert('RGB')
LR = modcrop(LR, opt.up_factor)
if len(ref_image) != 0 and opt.use_ref:
print("using ref images for SR")
for j in range(len(ref_image)):
Ref = Image.open(ref_image[j]).convert('RGB')
with torch.no_grad():
prediction = chop_forward(Ref, LR)
prediction = prediction.data[0].cpu().permute(1, 2, 0)
prediction = prediction * 255.0
prediction = prediction.clamp(0, 255)
lr_name = lr_image[i][-8:-4]
output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample' + str(j).zfill(5) + '.png'
Image.fromarray(np.uint8(prediction)).save(output_name)
else:
if opt.use_img_self:
print("using LR images itself for SR")
Ref = LR.resize((256, 256))
with torch.no_grad():
prediction = chop_forward(Ref, LR)
prediction = prediction.data[0].cpu().permute(1, 2, 0)
prediction = prediction * 255.0
prediction = prediction.clamp(0, 255)
lr_name = lr_image[i][-8:-4]
output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample0.png'
Image.fromarray(np.uint8(prediction)).save(output_name)
else:
print("using random noise for SR")
for j in range(opt.num_sample):
a = np.random.rand(256, 256, 3)
Ref = Image.fromarray(np.uint8(a * 128))
with torch.no_grad():
prediction = chop_forward(Ref, LR)
prediction = prediction.data[0].cpu().permute(1, 2, 0)
prediction = prediction * 255.0
prediction = prediction.clamp(0, 255)
lr_name = lr_image[i][-8:-4]
output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample' + str(j).zfill(5) + '.png'
print("random SR: {}".format(j))
Image.fromarray(np.uint8(prediction)).save(output_name)
# pre_LR = F.interpolate(prediction, scale_factor=1 / opt.up_factor, mode='bicubic')
#
# prediction = prediction.data[0].cpu().permute(1, 2, 0)
# pre_LR = pre_LR.data[0].cpu().permute(1, 2, 0)
#
# prediction = prediction * 255.0
# prediction = prediction.clamp(0, 255)
# lr_name = lr_image[i][-8:-4]
# output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample0.png'
# Image.fromarray(np.uint8(prediction)).save(output_name)
#
# pre_LR = pre_LR * 255.0
# pre_LR = pre_LR.clamp(0, 255)
#
# LR = np.array(LR).astype(np.float32)
# LR_Y = rgb2ycbcr(LR)
# pre_LR = np.array(pre_LR).astype(np.float32)
# pre_LR_Y = rgb2ycbcr(pre_LR)
#
# psnr_predicted = PSNR(pre_LR_Y, LR_Y, shave_border=1)
# ssim_predicted = SSIM(pre_LR_Y, LR_Y, shave_border=1)
transform = transforms.Compose([
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
]
)
style_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
]
)
def chop_forward(ref, img):
img = transform(img).unsqueeze(0)
ref = style_transform(ref).unsqueeze(0)
testset = utils.TensorDataset(ref, img)
test_dataloader = utils.DataLoader(testset, num_workers=opt.threads,
drop_last=False, batch_size=opt.testBatchSize, shuffle=False)
for iteration, batch in enumerate(test_dataloader, 1):
ref, input = batch[0].to(device), batch[1].to(device)
LR_feat = enc(F.interpolate(input, scale_factor=opt.up_factor, mode='bicubic'))
ref_feat = enc(ref)
SR, _ = model(input, LR_feat['r41'], ref_feat['r41'])
return SR
##Eval Start!!!!
eval()
| 7,250 | 33.528571 | 109 | py |
RefVAE | RefVAE-main/eval_8x.py | from __future__ import print_function
import argparse
import os
import torch
import cv2
from model import *
import torchvision.transforms as transforms
from collections import OrderedDict
import numpy as np
from os.path import join
import time
from network import encoder4, decoder4
import numpy
from dataset import is_image_file, rescale_img
from image_utils import *
from PIL import Image, ImageOps
from os import listdir
import torch.utils.data as utils
import os
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--testBatchSize', type=int, default=8, help='testing batch size')
parser.add_argument('--up_factor', type=int, default=8, help="super resolution upscale factor")
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--chop_forward', type=bool, default=True)
parser.add_argument('--use_img_self', action='store_true', help='using LR image itself or not')
parser.add_argument('--use_ref', action='store_true', help='using external reference images or not')
parser.add_argument('--num_sample', type=int, default=10, help='number of SR images')
parser.add_argument('--threads', type=int, default=6, help='number of threads for data loader to use')
parser.add_argument('--input_dataset', type=str, default='input')
parser.add_argument('--output_dataset', type=str, default='result')
parser.add_argument('--model_type', type=str, default='ConVAE')
parser.add_argument('--model', default='models/ConVAE_8x.pth', help='sr pretrained base model')
parser.add_argument("--encoder_dir", default='models/vgg_r41.pth', help='pre-trained encoder path')
parser.add_argument("--decoder_dir", default='models/dec_r41.pth', help='pre-trained encoder path')
opt = parser.parse_args()
print(opt)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('===> Building model ', opt.model_type)
model = VAE_v3_8x(up_factor=opt.up_factor)
enc = encoder4()
dec = decoder4()
if os.path.exists(opt.encoder_dir):
enc.load_state_dict(torch.load(opt.encoder_dir))
print('encoder model is loaded!')
if os.path.exists(opt.decoder_dir):
dec.load_state_dict(torch.load(opt.decoder_dir))
print('decoder model is loaded!')
for param in enc.parameters():
param.requires_grad = False
for param in dec.parameters():
param.requires_grad = False
model = model.to(device)
enc = enc.to(device)
dec = dec.to(device)
print('===> Loading datasets')
def eval():
model.eval()
enc.eval()
dec.eval()
if os.path.exists(opt.model):
model.load_state_dict(torch.load(opt.model, map_location=lambda storage, loc: storage))
print(opt.model)
Ref_filename = os.path.join(opt.input_dataset, 'Ref')
LR_filename = os.path.join(opt.input_dataset, 'LR')
SR_filename = opt.output_dataset
lr_image = [join(LR_filename, x) for x in listdir(LR_filename) if is_image_file(x)]
lr_image = sorted(lr_image)
ref_image = [join(Ref_filename, x) for x in listdir(Ref_filename) if is_image_file(x)]
ref_image = sorted(ref_image)
for i in range(len(lr_image)):
LR = Image.open(lr_image[i]).convert('RGB')
LR = modcrop(LR, opt.up_factor)
if len(ref_image) != 0 and opt.use_ref:
print("using ref images for SR")
for j in range(len(ref_image)):
Ref = Image.open(ref_image[j]).convert('RGB')
with torch.no_grad():
prediction = chop_forward(Ref, LR)
prediction = prediction.data[0].cpu().permute(1, 2, 0)
prediction = prediction * 255.0
prediction = prediction.clamp(0, 255)
lr_name = lr_image[i][-8:-4]
output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample' + str(j).zfill(5) + '.png'
Image.fromarray(np.uint8(prediction)).save(output_name)
else:
if opt.use_img_self:
print("using LR images itself for SR")
Ref = LR.resize((256, 256))
with torch.no_grad():
prediction = chop_forward(Ref, LR)
prediction = prediction.data[0].cpu().permute(1, 2, 0)
prediction = prediction * 255.0
prediction = prediction.clamp(0, 255)
lr_name = lr_image[i][-8:-4]
output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample0.png'
Image.fromarray(np.uint8(prediction)).save(output_name)
else:
print("using random noise for SR")
for j in range(opt.num_sample):
a = np.random.rand(256, 256, 3)
Ref = Image.fromarray(np.uint8(a * 128))
with torch.no_grad():
prediction = chop_forward(Ref, LR)
prediction = prediction.data[0].cpu().permute(1, 2, 0)
prediction = prediction * 255.0
prediction = prediction.clamp(0, 255)
lr_name = lr_image[i][-8:-4]
output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample' + str(j).zfill(5) + '.png'
print("random SR: {}".format(j))
Image.fromarray(np.uint8(prediction)).save(output_name)
# pre_LR = F.interpolate(prediction, scale_factor=1 / opt.up_factor, mode='bicubic')
#
# prediction = prediction.data[0].cpu().permute(1, 2, 0)
# pre_LR = pre_LR.data[0].cpu().permute(1, 2, 0)
#
# prediction = prediction * 255.0
# prediction = prediction.clamp(0, 255)
# lr_name = lr_image[i][-8:-4]
# output_name = SR_filename + '/' + lr_name.zfill(6) + '_sample0.png'
# Image.fromarray(np.uint8(prediction)).save(output_name)
#
# pre_LR = pre_LR * 255.0
# pre_LR = pre_LR.clamp(0, 255)
#
# LR = np.array(LR).astype(np.float32)
# LR_Y = rgb2ycbcr(LR)
# pre_LR = np.array(pre_LR).astype(np.float32)
# pre_LR_Y = rgb2ycbcr(pre_LR)
#
# psnr_predicted = PSNR(pre_LR_Y, LR_Y, shave_border=1)
# ssim_predicted = SSIM(pre_LR_Y, LR_Y, shave_border=1)
transform = transforms.Compose([
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
]
)
style_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
]
)
def chop_forward(ref, img):
img = transform(img).unsqueeze(0)
ref = style_transform(ref).unsqueeze(0)
testset = utils.TensorDataset(ref, img)
test_dataloader = utils.DataLoader(testset, num_workers=opt.threads,
drop_last=False, batch_size=opt.testBatchSize, shuffle=False)
for iteration, batch in enumerate(test_dataloader, 1):
ref, input = batch[0].to(device), batch[1].to(device)
LR_feat = enc(F.interpolate(input, scale_factor=opt.up_factor, mode='bicubic'))
ref_feat = enc(ref)
SR, _ = model(input, LR_feat['r41'], ref_feat['r41'])
return SR
##Eval Start!!!!
eval()
| 7,250 | 33.528571 | 109 | py |
TrianFlow | TrianFlow-master/test.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from core.dataset import KITTI_2012, KITTI_2015
from core.evaluation import eval_flow_avg, load_gt_flow_kitti
from core.evaluation import eval_depth
from core.visualize import Visualizer_debug
from core.networks import Model_depth_pose, Model_flow, Model_flowposenet
from core.evaluation import load_gt_flow_kitti, load_gt_mask
import torch
from tqdm import tqdm
import pdb
import cv2
import numpy as np
import yaml
def test_kitti_2012(cfg, model, gt_flows, noc_masks):
dataset = KITTI_2012(cfg.gt_2012_dir)
flow_list = []
for idx, inputs in enumerate(tqdm(dataset)):
img, K, K_inv = inputs
img = img[None,:,:,:]
K = K[None,:,:]
K_inv = K_inv[None,:,:]
img_h = int(img.shape[2] / 2)
img1, img2 = img[:,:,:img_h,:], img[:,:,img_h:,:]
img1, img2, K, K_inv = img1.cuda(), img2.cuda(), K.cuda(), K_inv.cuda()
if cfg.mode == 'flow' or cfg.mode == 'flowposenet':
flow = model.inference_flow(img1, img2)
else:
flow, _, _, _, _, _ = model.inference(img1, img2, K, K_inv)
#pdb.set_trace()
flow = flow[0].detach().cpu().numpy()
flow = flow.transpose(1,2,0)
flow_list.append(flow)
eval_flow_res = eval_flow_avg(gt_flows, noc_masks, flow_list, cfg, write_img=False)
print('CONFIG: {0}, mode: {1}'.format(cfg.config_file, cfg.mode))
print('[EVAL] [KITTI 2012]')
print(eval_flow_res)
return eval_flow_res
def test_kitti_2015(cfg, model, gt_flows, noc_masks, gt_masks, depth_save_dir=None):
dataset = KITTI_2015(cfg.gt_2015_dir)
visualizer = Visualizer_debug(depth_save_dir)
pred_flow_list = []
pred_disp_list = []
img_list = []
for idx, inputs in enumerate(tqdm(dataset)):
img, K, K_inv = inputs
img = img[None,:,:,:]
K = K[None,:,:]
K_inv = K_inv[None,:,:]
img_h = int(img.shape[2] / 2)
img1, img2 = img[:,:,:img_h,:], img[:,:,img_h:,:]
img_list.append(img1)
img1, img2, K, K_inv = img1.cuda(), img2.cuda(), K.cuda(), K_inv.cuda()
if cfg.mode == 'flow' or cfg.mode == 'flowposenet':
flow = model.inference_flow(img1, img2)
else:
flow, disp1, disp2, Rt, _, _ = model.inference(img1, img2, K, K_inv)
disp = disp1[0].detach().cpu().numpy()
disp = disp.transpose(1,2,0)
pred_disp_list.append(disp)
flow = flow[0].detach().cpu().numpy()
flow = flow.transpose(1,2,0)
pred_flow_list.append(flow)
#pdb.set_trace()
eval_flow_res = eval_flow_avg(gt_flows, noc_masks, pred_flow_list, cfg, moving_masks=gt_masks, write_img=False)
print('CONFIG: {0}, mode: {1}'.format(cfg.config_file, cfg.mode))
print('[EVAL] [KITTI 2015]')
print(eval_flow_res)
## depth evaluation
return eval_flow_res
def disp2depth(disp, min_depth=0.001, max_depth=80.0):
min_disp = 1 / max_depth
max_disp = 1 / min_depth
scaled_disp = min_disp + (max_disp - min_disp) * disp
depth = 1 / scaled_disp
return scaled_disp, depth
def resize_depths(gt_depth_list, pred_disp_list):
gt_disp_list = []
pred_depth_list = []
pred_disp_resized = []
for i in range(len(pred_disp_list)):
h, w = gt_depth_list[i].shape
pred_disp = cv2.resize(pred_disp_list[i], (w,h))
pred_depth = 1.0 / (pred_disp + 1e-4)
pred_depth_list.append(pred_depth)
pred_disp_resized.append(pred_disp)
return pred_depth_list, pred_disp_resized
def test_eigen_depth(cfg, model):
print('Evaluate depth using eigen split. Using model in ' + cfg.model_dir)
filenames = open('./data/eigen/test_files.txt').readlines()
pred_disp_list = []
for i in range(len(filenames)):
path1, idx, _ = filenames[i].strip().split(' ')
img = cv2.imread(os.path.join(os.path.join(cfg.raw_base_dir, path1), 'image_02/data/'+str(idx)+'.png'))
#img_resize = cv2.resize(img, (832,256))
img_resize = cv2.resize(img, (cfg.img_hw[1], cfg.img_hw[0]))
img_input = torch.from_numpy(img_resize / 255.0).float().cuda().unsqueeze(0).permute(0,3,1,2)
disp = model.infer_depth(img_input)
disp = disp[0].detach().cpu().numpy()
disp = disp.transpose(1,2,0)
pred_disp_list.append(disp)
#print(i)
gt_depths = np.load('./data/eigen/gt_depths.npz', allow_pickle=True)['data']
pred_depths, pred_disp_resized = resize_depths(gt_depths, pred_disp_list)
eval_depth_res = eval_depth(gt_depths, pred_depths)
abs_rel, sq_rel, rms, log_rms, a1, a2, a3 = eval_depth_res
sys.stderr.write(
"{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10} \n".
format('abs_rel', 'sq_rel', 'rms', 'log_rms',
'a1', 'a2', 'a3'))
sys.stderr.write(
"{:10.4f}, {:10.4f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f} \n".
format(abs_rel, sq_rel, rms, log_rms, a1, a2, a3))
return eval_depth_res
def resize_disp(pred_disp_list, gt_depths):
pred_depths = []
h, w = gt_depths[0].shape[0], gt_depths[0].shape[1]
for i in range(len(pred_disp_list)):
disp = pred_disp_list[i]
resize_disp = cv2.resize(disp, (w,h))
depth = 1.0 / resize_disp
pred_depths.append(depth)
return pred_depths
import h5py
import scipy.io as sio
def load_nyu_test_data(data_dir):
data = h5py.File(os.path.join(data_dir, 'nyu_depth_v2_labeled.mat'), 'r')
splits = sio.loadmat(os.path.join(data_dir, 'splits.mat'))
test = np.array(splits['testNdxs']).squeeze(1)
images = np.transpose(data['images'], [0,1,3,2])
depths = np.transpose(data['depths'], [0,2,1])
images = images[test-1]
depths = depths[test-1]
return images, depths
def test_nyu(cfg, model, test_images, test_gt_depths):
leng = test_images.shape[0]
print('Test nyu depth on '+str(leng)+' images. Using depth model in '+cfg.model_dir)
pred_disp_list = []
crop_imgs = []
crop_gt_depths = []
for i in range(leng):
img = test_images[i]
img_crop = img[:,45:472,41:602]
crop_imgs.append(img_crop)
gt_depth_crop = test_gt_depths[i][45:472,41:602]
crop_gt_depths.append(gt_depth_crop)
#img = np.transpose(cv2.resize(np.transpose(img_crop, [1,2,0]), (576,448)), [2,0,1])
img = np.transpose(cv2.resize(np.transpose(img_crop, [1,2,0]), (cfg.img_hw[1],cfg.img_hw[0])), [2,0,1])
img_t = torch.from_numpy(img).float().cuda().unsqueeze(0) / 255.0
disp = model.infer_depth(img_t)
disp = np.transpose(disp[0].cpu().detach().numpy(), [1,2,0])
pred_disp_list.append(disp)
pred_depths = resize_disp(pred_disp_list, crop_gt_depths)
eval_depth_res = eval_depth(crop_gt_depths, pred_depths, nyu=True)
abs_rel, sq_rel, rms, log_rms, a1, a2, a3 = eval_depth_res
sys.stderr.write(
"{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10} \n".
format('abs_rel', 'sq_rel', 'rms', 'log10',
'a1', 'a2', 'a3'))
sys.stderr.write(
"{:10.4f}, {:10.4f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f} \n".
format(abs_rel, sq_rel, rms, log_rms, a1, a2, a3))
return eval_depth_res
def test_single_image(img_path, model, training_hw, save_dir='./'):
img = cv2.imread(img_path)
h, w = img.shape[0:2]
img_resized = cv2.resize(img, (training_hw[1], training_hw[0]))
img_t = torch.from_numpy(np.transpose(img_resized, [2,0,1])).float().cuda().unsqueeze(0) / 255.0
disp = model.infer_depth(img_t)
disp = np.transpose(disp[0].cpu().detach().numpy(), [1,2,0])
disp_resized = cv2.resize(disp, (w,h))
depth = 1.0 / (1e-6 + disp_resized)
visualizer = Visualizer_debug(dump_dir=save_dir)
visualizer.save_disp_color_img(disp_resized, name='demo')
print('Depth prediction saved in ' + save_dir)
if __name__ == '__main__':
import argparse
arg_parser = argparse.ArgumentParser(
description="TrianFlow testing."
)
arg_parser.add_argument('-c', '--config_file', default=None, help='config file.')
arg_parser.add_argument('-g', '--gpu', type=str, default=0, help='gpu id.')
arg_parser.add_argument('--mode', type=str, default='depth', help='mode for testing.')
arg_parser.add_argument('--task', type=str, default='kitti_depth', help='To test on which task, kitti_depth or kitti_flow or nyuv2 or demo')
arg_parser.add_argument('--image_path', type=str, default=None, help='Set this only when task==demo. Depth demo for single image.')
arg_parser.add_argument('--pretrained_model', type=str, default=None, help='directory for loading flow pretrained models')
arg_parser.add_argument('--result_dir', type=str, default=None, help='directory for saving predictions')
args = arg_parser.parse_args()
if not os.path.exists(args.config_file):
raise ValueError('config file not found.')
with open(args.config_file, 'r') as f:
cfg = yaml.safe_load(f)
cfg['img_hw'] = (cfg['img_hw'][0], cfg['img_hw'][1])
#cfg['log_dump_dir'] = os.path.join(args.model_dir, 'log.pkl')
cfg['model_dir'] = args.result_dir
# copy attr into cfg
for attr in dir(args):
if attr[:2] != '__':
cfg[attr] = getattr(args, attr)
class pObject(object):
def __init__(self):
pass
cfg_new = pObject()
for attr in list(cfg.keys()):
setattr(cfg_new, attr, cfg[attr])
if args.mode == 'flow':
model = Model_flow(cfg_new)
elif args.mode == 'depth' or args.mode == 'flow_3stage':
model = Model_depth_pose(cfg_new)
elif args.mode == 'flowposenet':
model = Model_flowposenet(cfg_new)
if args.task == 'demo':
model = Model_depth_pose(cfg_new)
model.cuda()
weights = torch.load(args.pretrained_model)
model.load_state_dict(weights['model_state_dict'])
model.eval()
print('Model Loaded.')
if args.task == 'kitti_depth':
depth_res = test_eigen_depth(cfg_new, model)
elif args.task == 'kitti_flow':
gt_flows_2015, noc_masks_2015 = load_gt_flow_kitti(cfg_new.gt_2015_dir, 'kitti_2015')
gt_masks_2015 = load_gt_mask(cfg_new.gt_2015_dir)
flow_res = test_kitti_2015(cfg_new, model, gt_flows_2015, noc_masks_2015, gt_masks_2015)
elif args.task == 'nyuv2':
test_images, test_gt_depths = load_nyu_test_data(cfg_new.nyu_test_dir)
depth_res = test_nyu(cfg_new, model, test_images, test_gt_depths)
elif args.task == 'demo':
test_single_image(args.image_path, model, training_hw=cfg['img_hw'], save_dir=args.result_dir)
| 10,749 | 40.030534 | 144 | py |
TrianFlow | TrianFlow-master/train.py | import os, sys
import yaml
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from core.dataset import KITTI_RAW, KITTI_Prepared, NYU_Prepare, NYU_v2, KITTI_Odo
from core.networks import get_model
from core.config import generate_loss_weights_dict
from core.visualize import Visualizer
from core.evaluation import load_gt_flow_kitti, load_gt_mask
from test import test_kitti_2012, test_kitti_2015, test_eigen_depth, test_nyu, load_nyu_test_data
from collections import OrderedDict
import torch
import torch.utils.data
from tqdm import tqdm
import shutil
import pickle
import pdb
def save_model(iter_, model_dir, filename, model, optimizer):
torch.save({"iteration": iter_, "model_state_dict": model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, os.path.join(model_dir, filename))
def load_model(model_dir, filename, model, optimizer):
data = torch.load(os.path.join(model_dir, filename))
iter_ = data['iteration']
model.load_state_dict(data['model_state_dict'])
optimizer.load_state_dict(data['optimizer_state_dict'])
return iter_, model, optimizer
def train(cfg):
# load model and optimizer
model = get_model(cfg.mode)(cfg)
if cfg.multi_gpu:
model = torch.nn.DataParallel(model)
model = model.cuda()
optimizer = torch.optim.Adam([{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': cfg.lr}])
# Load Pretrained Models
if cfg.resume:
if cfg.iter_start > 0:
cfg.iter_start, model, optimizer = load_model(cfg.model_dir, 'iter_{}.pth'.format(cfg.iter_start), model, optimizer)
else:
cfg.iter_start, model, optimizer = load_model(cfg.model_dir, 'last.pth', model, optimizer)
elif cfg.flow_pretrained_model:
data = torch.load(cfg.flow_pretrained_model)['model_state_dict']
renamed_dict = OrderedDict()
for k, v in data.items():
if cfg.multi_gpu:
name = 'module.model_flow.' + k
elif cfg.mode == 'flowposenet':
name = 'model_flow.' + k
else:
name = 'model_pose.model_flow.' + k
renamed_dict[name] = v
missing_keys, unexp_keys = model.load_state_dict(renamed_dict, strict=False)
print(missing_keys)
print(unexp_keys)
print('Load Flow Pretrained Model from ' + cfg.flow_pretrained_model)
if cfg.depth_pretrained_model and not cfg.resume:
data = torch.load(cfg.depth_pretrained_model)['model_state_dict']
if cfg.multi_gpu:
renamed_dict = OrderedDict()
for k, v in data.items():
name = 'module.' + k
renamed_dict[name] = v
missing_keys, unexp_keys = model.load_state_dict(renamed_dict, strict=False)
else:
missing_keys, unexp_keys = model.load_state_dict(data, strict=False)
print(missing_keys)
print('##############')
print(unexp_keys)
print('Load Depth Pretrained Model from ' + cfg.depth_pretrained_model)
loss_weights_dict = generate_loss_weights_dict(cfg)
visualizer = Visualizer(loss_weights_dict, cfg.log_dump_dir)
# load dataset
data_dir = os.path.join(cfg.prepared_base_dir, cfg.prepared_save_dir)
if not os.path.exists(os.path.join(data_dir, 'train.txt')):
if cfg.dataset == 'kitti_depth':
kitti_raw_dataset = KITTI_RAW(cfg.raw_base_dir, cfg.static_frames_txt, cfg.test_scenes_txt)
kitti_raw_dataset.prepare_data_mp(data_dir, stride=1)
elif cfg.dataset == 'kitti_odo':
kitti_raw_dataset = KITTI_Odo(cfg.raw_base_dir)
kitti_raw_dataset.prepare_data_mp(data_dir, stride=1)
elif cfg.dataset == 'nyuv2':
nyu_raw_dataset = NYU_Prepare(cfg.raw_base_dir, cfg.nyu_test_dir)
nyu_raw_dataset.prepare_data_mp(data_dir, stride=10)
else:
raise NotImplementedError
if cfg.dataset == 'kitti_depth':
dataset = KITTI_Prepared(data_dir, num_scales=cfg.num_scales, img_hw=cfg.img_hw, num_iterations=(cfg.num_iterations - cfg.iter_start) * cfg.batch_size)
elif cfg.dataset == 'kitti_odo':
dataset = KITTI_Prepared(data_dir, num_scales=cfg.num_scales, img_hw=cfg.img_hw, num_iterations=(cfg.num_iterations - cfg.iter_start) * cfg.batch_size)
elif cfg.dataset == 'nyuv2':
dataset = NYU_v2(data_dir, num_scales=cfg.num_scales, img_hw=cfg.img_hw, num_iterations=(cfg.num_iterations - cfg.iter_start) * cfg.batch_size)
else:
raise NotImplementedError
dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=False)
if cfg.dataset == 'kitti_depth' or cfg.dataset == 'kitti_odo':
gt_flows_2012, noc_masks_2012 = load_gt_flow_kitti(cfg.gt_2012_dir, 'kitti_2012')
gt_flows_2015, noc_masks_2015 = load_gt_flow_kitti(cfg.gt_2015_dir, 'kitti_2015')
gt_masks_2015 = load_gt_mask(cfg.gt_2015_dir)
elif cfg.dataset == 'nyuv2':
test_images, test_gt_depths = load_nyu_test_data(cfg.nyu_test_dir)
# training
print('starting iteration: {}.'.format(cfg.iter_start))
for iter_, inputs in enumerate(tqdm(dataloader)):
if (iter_ + 1) % cfg.test_interval == 0 and (not cfg.no_test):
model.eval()
if args.multi_gpu:
model_eval = model.module
else:
model_eval = model
if cfg.dataset == 'kitti_depth' or cfg.dataset == 'kitti_odo':
if not (cfg.mode == 'depth' or cfg.mode == 'flowposenet'):
eval_2012_res = test_kitti_2012(cfg, model_eval, gt_flows_2012, noc_masks_2012)
eval_2015_res = test_kitti_2015(cfg, model_eval, gt_flows_2015, noc_masks_2015, gt_masks_2015, depth_save_dir=os.path.join(cfg.model_dir, 'results'))
visualizer.add_log_pack({'eval_2012_res': eval_2012_res, 'eval_2015_res': eval_2015_res})
elif cfg.dataset == 'nyuv2':
if not cfg.mode == 'flow':
eval_nyu_res = test_nyu(cfg, model_eval, test_images, test_gt_depths)
visualizer.add_log_pack({'eval_nyu_res': eval_nyu_res})
visualizer.dump_log(os.path.join(cfg.model_dir, 'log.pkl'))
model.train()
iter_ = iter_ + cfg.iter_start
optimizer.zero_grad()
inputs = [k.cuda() for k in inputs]
loss_pack = model(inputs)
if iter_ % cfg.log_interval == 0:
visualizer.print_loss(loss_pack, iter_=iter_)
loss_list = []
for key in list(loss_pack.keys()):
loss_list.append((loss_weights_dict[key] * loss_pack[key].mean()).unsqueeze(0))
loss = torch.cat(loss_list, 0).sum()
loss.backward()
optimizer.step()
if (iter_ + 1) % cfg.save_interval == 0:
save_model(iter_, cfg.model_dir, 'iter_{}.pth'.format(iter_), model, optimizer)
save_model(iter_, cfg.model_dir, 'last.pth'.format(iter_), model, optimizer)
if cfg.dataset == 'kitti_depth':
if cfg.mode == 'depth' or cfg.mode == 'depth_pose':
eval_depth_res = test_eigen_depth(cfg, model_eval)
if __name__ == '__main__':
import argparse
arg_parser = argparse.ArgumentParser(
description="TrianFlow training pipeline."
)
arg_parser.add_argument('-c', '--config_file', default=None, help='config file.')
arg_parser.add_argument('-g', '--gpu', type=str, default=0, help='gpu id.')
arg_parser.add_argument('--batch_size', type=int, default=8, help='batch size.')
arg_parser.add_argument('--iter_start', type=int, default=0, help='starting iteration.')
arg_parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
arg_parser.add_argument('--num_workers', type=int, default=6, help='number of workers.')
arg_parser.add_argument('--log_interval', type=int, default=100, help='interval for printing loss.')
arg_parser.add_argument('--test_interval', type=int, default=2000, help='interval for evaluation.')
arg_parser.add_argument('--save_interval', type=int, default=2000, help='interval for saving models.')
arg_parser.add_argument('--mode', type=str, default='flow', help='training mode.')
arg_parser.add_argument('--model_dir', type=str, default=None, help='directory for saving models')
arg_parser.add_argument('--prepared_save_dir', type=str, default='data_s1', help='directory name for generated training dataset')
arg_parser.add_argument('--flow_pretrained_model', type=str, default=None, help='directory for loading flow pretrained models')
arg_parser.add_argument('--depth_pretrained_model', type=str, default=None, help='directory for loading depth pretrained models')
arg_parser.add_argument('--resume', action='store_true', help='to resume training.')
arg_parser.add_argument('--multi_gpu', action='store_true', help='to use multiple gpu for training.')
arg_parser.add_argument('--no_test', action='store_true', help='without evaluation.')
args = arg_parser.parse_args()
#args.config_file = 'config/debug.yaml'
if args.config_file is None:
raise ValueError('config file needed. -c --config_file.')
# set model
if args.model_dir is None:
args.model_dir = os.path.join('models', os.path.splitext(os.path.split(args.config_file)[1])[0])
args.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.model_dir, args.mode)
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
if not os.path.exists(args.config_file):
raise ValueError('config file not found.')
with open(args.config_file, 'r') as f:
cfg = yaml.safe_load(f)
cfg['img_hw'] = (cfg['img_hw'][0], cfg['img_hw'][1])
cfg['log_dump_dir'] = os.path.join(args.model_dir, 'log.pkl')
shutil.copy(args.config_file, args.model_dir)
# copy attr into cfg
for attr in dir(args):
if attr[:2] != '__':
cfg[attr] = getattr(args, attr)
# set gpu
num_gpus = len(args.gpu.split(','))
if (args.multi_gpu and num_gpus <= 1) or ((not args.multi_gpu) and num_gpus > 1):
raise ValueError('Error! the number of gpus used in the --gpu argument does not match the argument --multi_gpu.')
if args.multi_gpu:
cfg['batch_size'] = cfg['batch_size'] * num_gpus
cfg['num_iterations'] = int(cfg['num_iterations'] / num_gpus)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
class pObject(object):
def __init__(self):
pass
cfg_new = pObject()
for attr in list(cfg.keys()):
setattr(cfg_new, attr, cfg[attr])
with open(os.path.join(args.model_dir, 'config.pkl'), 'wb') as f:
pickle.dump(cfg_new, f)
# main function
train(cfg_new)
| 10,881 | 49.37963 | 169 | py |
TrianFlow | TrianFlow-master/infer_vo.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from core.networks.model_depth_pose import Model_depth_pose
from core.networks.model_flow import Model_flow
from visualizer import *
from profiler import Profiler
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
from sklearn import linear_model
import yaml
import warnings
import copy
from collections import OrderedDict
warnings.filterwarnings("ignore")
def save_traj(path, poses):
"""
path: file path of saved poses
poses: list of global poses
"""
f = open(path, 'w')
for i in range(len(poses)):
pose = poses[i].flatten()[:12] # [3x4]
line = " ".join([str(j) for j in pose])
f.write(line + '\n')
print('Trajectory Saved.')
def projection(xy, points, h_max, w_max):
# Project the triangulation points to depth map. Directly correspondence mapping rather than projection.
# xy: [N, 2] points: [3, N]
depth = np.zeros((h_max, w_max))
xy_int = np.around(xy).astype('int')
# Ensure all the correspondences are inside the image.
y_idx = (xy_int[:, 0] >= 0) * (xy_int[:, 0] < w_max)
x_idx = (xy_int[:, 1] >= 0) * (xy_int[:, 1] < h_max)
idx = y_idx * x_idx
xy_int = xy_int[idx]
points_valid = points[:, idx]
depth[xy_int[:, 1], xy_int[:, 0]] = points_valid[2]
return depth
def unprojection(xy, depth, K):
# xy: [N, 2] image coordinates of match points
# depth: [N] depth value of match points
N = xy.shape[0]
# initialize regular grid
ones = np.ones((N, 1))
xy_h = np.concatenate([xy, ones], axis=1)
xy_h = np.transpose(xy_h, (1,0)) # [3, N]
#depth = np.transpose(depth, (1,0)) # [1, N]
K_inv = np.linalg.inv(K)
points = np.matmul(K_inv, xy_h) * depth
points = np.transpose(points) # [N, 3]
return points
def cv_triangulation(matches, pose):
# matches: [N, 4], the correspondence xy coordinates
# pose: [4, 4], the relative pose trans from 1 to 2
xy1 = matches[:, :2].transpose()
xy2 = matches[:, 2:].transpose() # [2, N]
pose1 = np.eye(4)
pose2 = pose1 @ pose
points = cv2.triangulatePoints(pose1[:3], pose2[:3], xy1, xy2)
points /= points[3]
points1 = pose1[:3] @ points
points2 = pose2[:3] @ points
return points1, points2
class infer_vo():
def __init__(self, seq_id, sequences_root_dir):
self.img_dir = sequences_root_dir
#self.img_dir = '/home4/zhaow/data/kitti_odometry/sampled_s4_sequences/'
self.seq_id = seq_id
self.raw_img_h = 370.0#320
self.raw_img_w = 1226.0#1024
self.new_img_h = 256#320
self.new_img_w = 832#1024
self.max_depth = 50.0
self.min_depth = 0.0
self.cam_intrinsics = self.read_rescale_camera_intrinsics(os.path.join(self.img_dir, seq_id) + '/calib.txt')
self.flow_pose_ransac_thre = 0.1 #0.2
self.flow_pose_ransac_times = 10 #5
self.flow_pose_min_flow = 5
self.align_ransac_min_samples = 3
self.align_ransac_max_trials = 100
self.align_ransac_stop_prob = 0.99
self.align_ransac_thre = 1.0
self.PnP_ransac_iter = 1000
self.PnP_ransac_thre = 1
self.PnP_ransac_times = 5
def read_rescale_camera_intrinsics(self, path):
raw_img_h = self.raw_img_h
raw_img_w = self.raw_img_w
new_img_h = self.new_img_h
new_img_w = self.new_img_w
with open(path, 'r') as f:
lines = f.readlines()
data = lines[-1].strip('\n').split(' ')[1:]
data = [float(k) for k in data]
data = np.array(data).reshape(3,4)
cam_intrinsics = data[:3,:3]
cam_intrinsics[0,:] = cam_intrinsics[0,:] * new_img_w / raw_img_w
cam_intrinsics[1,:] = cam_intrinsics[1,:] * new_img_h / raw_img_h
return cam_intrinsics
def load_images(self):
path = self.img_dir
seq = self.seq_id
new_img_h = self.new_img_h
new_img_w = self.new_img_w
seq_dir = os.path.join(path, seq)
image_dir = os.path.join(seq_dir, 'image_2')
num = len(os.listdir(image_dir))
images = []
for i in range(num):
image = cv2.imread(os.path.join(image_dir, '%.6d'%i)+'.png')
image = cv2.resize(image, (new_img_w, new_img_h))
images.append(image)
return images
def get_prediction(self, img1, img2, model, K, K_inv, match_num):
# img1: [3,H,W] K: [3,3]
#visualizer = Visualizer_debug('/home3/zhaow/TrianFlow-pytorch/vis/')
img1_t = torch.from_numpy(np.transpose(img1 / 255.0, [2,0,1])).cuda().float().unsqueeze(0)
img2_t = torch.from_numpy(np.transpose(img2 / 255.0, [2,0,1])).cuda().float().unsqueeze(0)
K = torch.from_numpy(K).cuda().float().unsqueeze(0)
K_inv = torch.from_numpy(K_inv).cuda().float().unsqueeze(0)
filt_depth_match, depth1, depth2 = model.infer_vo(img1_t, img2_t, K, K_inv, match_num)
return filt_depth_match[0].transpose(0,1).cpu().detach().numpy(), depth1[0].squeeze(0).cpu().detach().numpy(), depth2[0].squeeze(0).cpu().detach().numpy()
def process_video(self, images, model):
'''Process a sequence to get scale consistent trajectory results.
Register according to depth net predictions. Here we assume depth predictions have consistent scale.
If not, pleas use process_video_tri which only use triangulated depth to get self-consistent scaled pose.
'''
poses = []
global_pose = np.eye(4)
# The first one global pose is origin.
poses.append(copy.deepcopy(global_pose))
seq_len = len(images)
K = self.cam_intrinsics
K_inv = np.linalg.inv(self.cam_intrinsics)
for i in range(seq_len-1):
img1, img2 = images[i], images[i+1]
depth_match, depth1, depth2 = self.get_prediction(img1, img2, model, K, K_inv, match_num=5000)
rel_pose = np.eye(4)
flow_pose = self.solve_pose_flow(depth_match[:,:2], depth_match[:,2:])
rel_pose[:3,:3] = copy.deepcopy(flow_pose[:3,:3])
if np.linalg.norm(flow_pose[:3,3:]) != 0:
scale = self.align_to_depth(depth_match[:,:2], depth_match[:,2:], flow_pose, depth2)
rel_pose[:3,3:] = flow_pose[:3,3:] * scale
if np.linalg.norm(flow_pose[:3,3:]) == 0 or scale == -1:
print('PnP '+str(i))
pnp_pose = self.solve_pose_pnp(depth_match[:,:2], depth_match[:,2:], depth1)
rel_pose = pnp_pose
global_pose[:3,3:] = np.matmul(global_pose[:3,:3], rel_pose[:3,3:]) + global_pose[:3,3:]
global_pose[:3,:3] = np.matmul(global_pose[:3,:3], rel_pose[:3,:3])
poses.append(copy.deepcopy(global_pose))
print(i)
return poses
def normalize_coord(self, xy, K):
xy_norm = copy.deepcopy(xy)
xy_norm[:,0] = (xy[:,0] - K[0,2]) / K[0,0]
xy_norm[:,1] = (xy[:,1] - K[1,2]) / K[1,1]
return xy_norm
def align_to_depth(self, xy1, xy2, pose, depth2):
# Align the translation scale according to triangulation depth
# xy1, xy2: [N, 2] pose: [4, 4] depth2: [H, W]
# Triangulation
img_h, img_w = np.shape(depth2)[0], np.shape(depth2)[1]
pose_inv = np.linalg.inv(pose)
xy1_norm = self.normalize_coord(xy1, self.cam_intrinsics)
xy2_norm = self.normalize_coord(xy2, self.cam_intrinsics)
points1_tri, points2_tri = cv_triangulation(np.concatenate([xy1_norm, xy2_norm], axis=1), pose_inv)
depth2_tri = projection(xy2, points2_tri, img_h, img_w)
depth2_tri[depth2_tri < 0] = 0
# Remove negative depths
valid_mask = (depth2 > 0) * (depth2_tri > 0)
depth_pred_valid = depth2[valid_mask]
depth_tri_valid = depth2_tri[valid_mask]
if np.sum(valid_mask) > 100:
scale_reg = linear_model.RANSACRegressor(base_estimator=linear_model.LinearRegression(fit_intercept=False), min_samples=self.align_ransac_min_samples, \
max_trials=self.align_ransac_max_trials, stop_probability=self.align_ransac_stop_prob, residual_threshold=self.align_ransac_thre)
scale_reg.fit(depth_tri_valid.reshape(-1, 1), depth_pred_valid.reshape(-1, 1))
scale = scale_reg.estimator_.coef_[0, 0]
else:
scale = -1
return scale
def solve_pose_pnp(self, xy1, xy2, depth1):
# Use pnp to solve relative poses.
# xy1, xy2: [N, 2] depth1: [H, W]
img_h, img_w = np.shape(depth1)[0], np.shape(depth1)[1]
# Ensure all the correspondences are inside the image.
x_idx = (xy2[:, 0] >= 0) * (xy2[:, 0] < img_w)
y_idx = (xy2[:, 1] >= 0) * (xy2[:, 1] < img_h)
idx = y_idx * x_idx
xy1 = xy1[idx]
xy2 = xy2[idx]
xy1_int = xy1.astype(np.int)
sample_depth = depth1[xy1_int[:,1], xy1_int[:,0]]
valid_depth_mask = (sample_depth < self.max_depth) * (sample_depth > self.min_depth)
xy1 = xy1[valid_depth_mask]
xy2 = xy2[valid_depth_mask]
# Unproject to 3d space
points1 = unprojection(xy1, sample_depth[valid_depth_mask], self.cam_intrinsics)
# ransac
best_rt = []
max_inlier_num = 0
max_ransac_iter = self.PnP_ransac_times
for i in range(max_ransac_iter):
if xy2.shape[0] > 4:
flag, r, t, inlier = cv2.solvePnPRansac(objectPoints=points1, imagePoints=xy2, cameraMatrix=self.cam_intrinsics, distCoeffs=None, iterationsCount=self.PnP_ransac_iter, reprojectionError=self.PnP_ransac_thre)
if flag and inlier.shape[0] > max_inlier_num:
best_rt = [r, t]
max_inlier_num = inlier.shape[0]
pose = np.eye(4)
if len(best_rt) != 0:
r, t = best_rt
pose[:3,:3] = cv2.Rodrigues(r)[0]
pose[:3,3:] = t
pose = np.linalg.inv(pose)
return pose
def solve_pose_flow(self, xy1, xy2):
# Solve essential matrix to find relative pose from flow.
# ransac
best_rt = []
max_inlier_num = 0
max_ransac_iter = self.flow_pose_ransac_times
best_inliers = np.ones((xy1.shape[0])) == 1
pp = (self.cam_intrinsics[0,2], self.cam_intrinsics[1,2])
# flow magnitude
avg_flow = np.mean(np.linalg.norm(xy1 - xy2, axis=1))
if avg_flow > self.flow_pose_min_flow:
for i in range(max_ransac_iter):
E, inliers = cv2.findEssentialMat(xy2, xy1, focal=self.cam_intrinsics[0,0], pp=pp, method=cv2.RANSAC, prob=0.99, threshold=self.flow_pose_ransac_thre)
cheirality_cnt, R, t, _ = cv2.recoverPose(E, xy2, xy1, focal=self.cam_intrinsics[0,0], pp=pp)
if inliers.sum() > max_inlier_num and cheirality_cnt > 50:
best_rt = [R, t]
max_inlier_num = inliers.sum()
best_inliers = inliers
if len(best_rt) == 0:
R = np.eye(3)
t = np.zeros((3,1))
best_rt = [R, t]
else:
R = np.eye(3)
t = np.zeros((3,1))
best_rt = [R, t]
R, t = best_rt
pose = np.eye(4)
pose[:3,:3] = R
pose[:3,3:] = t
return pose
if __name__ == '__main__':
import argparse
arg_parser = argparse.ArgumentParser(
description="TrianFlow training pipeline."
)
arg_parser.add_argument('-c', '--config_file', default=None, help='config file.')
arg_parser.add_argument('-g', '--gpu', type=str, default=0, help='gpu id.')
arg_parser.add_argument('--mode', type=str, default='flow', help='training mode.')
arg_parser.add_argument('--traj_save_dir_txt', type=str, default=None, help='directory for saving results')
arg_parser.add_argument('--sequences_root_dir', type=str, default=None, help='directory for test sequences')
arg_parser.add_argument('--sequence', type=str, default='09', help='Test sequence id.')
arg_parser.add_argument('--pretrained_model', type=str, default=None, help='directory for loading pretrained models')
args = arg_parser.parse_args()
with open(args.config_file, 'r') as f:
cfg = yaml.safe_load(f)
cfg['dataset'] = 'kitti_odo'
# copy attr into cfg
for attr in dir(args):
if attr[:2] != '__':
cfg[attr] = getattr(args, attr)
class pObject(object):
def __init__(self):
pass
cfg_new = pObject()
for attr in list(cfg.keys()):
setattr(cfg_new, attr, cfg[attr])
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
model = Model_depth_pose(cfg_new)
model.cuda()
weights = torch.load(args.pretrained_model)
model.load_state_dict(weights['model_state_dict'])
model.eval()
print('Model Loaded.')
print('Testing VO.')
vo_test = infer_vo(args.sequence, args.sequences_root_dir)
images = vo_test.load_images()
print('Images Loaded. Total ' + str(len(images)) + ' images found.')
poses = vo_test.process_video(images, model)
print('Test completed.')
traj_txt = args.traj_save_dir_txt
save_traj(traj_txt, poses)
| 13,467 | 38.964392 | 223 | py |
TrianFlow | TrianFlow-master/core/networks/model_flow.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from structures import *
from pytorch_ssim import SSIM
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
import cv2
def transformerFwd(U,
flo,
out_size,
name='SpatialTransformerFwd'):
"""Forward Warping Layer described in
'Occlusion Aware Unsupervised Learning of Optical Flow by Yang Wang et al'
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
flo: float
The optical flow used for forward warping
having the shape of [num_batch, height, width, 2].
backprop: boolean
Indicates whether to back-propagate through forward warping layer
out_size: tuple of two ints
The size of the output of the network (height, width)
"""
def _repeat(x, n_repeats):
rep = torch.ones(size=[n_repeats], dtype=torch.long).unsqueeze(1).transpose(1,0)
x = x.view([-1,1]).mm(rep)
return x.view([-1]).int()
def _interpolate(im, x, y, out_size):
# constants
num_batch, height, width, channels = im.shape[0], im.shape[1], im.shape[2], im.shape[3]
out_height = out_size[0]
out_width = out_size[1]
max_y = int(height - 1)
max_x = int(width - 1)
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0) * (width - 1.0) / 2.0
y = (y + 1.0) * (height - 1.0) / 2.0
# do sampling
x0 = (torch.floor(x)).int()
x1 = x0 + 1
y0 = (torch.floor(y)).int()
y1 = y0 + 1
x0_c = torch.clamp(x0, 0, max_x)
x1_c = torch.clamp(x1, 0, max_x)
y0_c = torch.clamp(y0, 0, max_y)
y1_c = torch.clamp(y1, 0, max_y)
dim2 = width
dim1 = width * height
base = _repeat(torch.arange(0, num_batch) * dim1, out_height * out_width).to(im.get_device())
base_y0 = base + y0_c * dim2
base_y1 = base + y1_c * dim2
idx_a = base_y0 + x0_c
idx_b = base_y1 + x0_c
idx_c = base_y0 + x1_c
idx_d = base_y1 + x1_c
# use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = im.view([-1, channels])
im_flat = im_flat.float()
# and finally calculate interpolated values
x0_f = x0.float()
x1_f = x1.float()
y0_f = y0.float()
y1_f = y1.float()
wa = ((x1_f - x) * (y1_f - y)).unsqueeze(1)
wb = ((x1_f - x) * (y - y0_f)).unsqueeze(1)
wc = ((x - x0_f) * (y1_f - y)).unsqueeze(1)
wd = ((x - x0_f) * (y - y0_f)).unsqueeze(1)
zerof = torch.zeros_like(wa)
wa = torch.where(
(torch.eq(x0_c, x0) & torch.eq(y0_c, y0)).unsqueeze(1), wa, zerof)
wb = torch.where(
(torch.eq(x0_c, x0) & torch.eq(y1_c, y1)).unsqueeze(1), wb, zerof)
wc = torch.where(
(torch.eq(x1_c, x1) & torch.eq(y0_c, y0)).unsqueeze(1), wc, zerof)
wd = torch.where(
(torch.eq(x1_c, x1) & torch.eq(y1_c, y1)).unsqueeze(1), wd, zerof)
zeros = torch.zeros(
size=[
int(num_batch) * int(height) *
int(width), int(channels)
],
dtype=torch.float)
output = zeros.to(im.get_device())
output = output.scatter_add(dim=0, index=idx_a.long().unsqueeze(1).repeat(1,channels), src=im_flat * wa)
output = output.scatter_add(dim=0, index=idx_b.long().unsqueeze(1).repeat(1,channels), src=im_flat * wb)
output = output.scatter_add(dim=0, index=idx_c.long().unsqueeze(1).repeat(1,channels), src=im_flat * wc)
output = output.scatter_add(dim=0, index=idx_d.long().unsqueeze(1).repeat(1,channels), src=im_flat * wd)
return output
def _meshgrid(height, width):
# This should be equivalent to:
x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
return torch.from_numpy(x_t).float(), torch.from_numpy(y_t).float()
def _transform(flo, input_dim, out_size):
num_batch, height, width, num_channels = input_dim.shape[0:4]
# grid of (x_t, y_t, 1), eq (1) in ref [1]
height_f = float(height)
width_f = float(width)
out_height = out_size[0]
out_width = out_size[1]
x_s, y_s = _meshgrid(out_height, out_width)
x_s = x_s.to(flo.get_device()).unsqueeze(0)
x_s = x_s.repeat([num_batch, 1, 1])
y_s = y_s.to(flo.get_device()).unsqueeze(0)
y_s =y_s.repeat([num_batch, 1, 1])
x_t = x_s + flo[:, :, :, 0] / ((out_width - 1.0) / 2.0)
y_t = y_s + flo[:, :, :, 1] / ((out_height - 1.0) / 2.0)
x_t_flat = x_t.view([-1])
y_t_flat = y_t.view([-1])
input_transformed = _interpolate(input_dim, x_t_flat, y_t_flat,
out_size)
output = input_transformed.view([num_batch, out_height, out_width, num_channels])
return output
#out_size = int(out_size)
output = _transform(flo, U, out_size)
return output
class Model_flow(nn.Module):
def __init__(self, cfg):
super(Model_flow, self).__init__()
self.fpyramid = FeaturePyramid()
self.pwc_model = PWC_tf()
if cfg.mode == 'depth' or cfg.mode == 'flowposenet':
# Stage 2 training
for param in self.fpyramid.parameters():
param.requires_grad = False
for param in self.pwc_model.parameters():
param.requires_grad = False
# hyperparameters
self.dataset = cfg.dataset
self.num_scales = cfg.num_scales
self.flow_consist_alpha = cfg.h_flow_consist_alpha
self.flow_consist_beta = cfg.h_flow_consist_beta
def get_occlusion_mask_from_flow(self, tensor_size, flow):
mask = torch.ones(tensor_size).to(flow.get_device())
h, w = mask.shape[2], mask.shape[3]
occ_mask = transformerFwd(mask.permute(0,2,3,1), flow.permute(0,2,3,1), out_size=[h,w]).permute(0,3,1,2)
with torch.no_grad():
occ_mask = torch.clamp(occ_mask, 0.0, 1.0)
return occ_mask
def get_flow_norm(self, flow, p=2):
'''
Inputs:
flow (bs, 2, H, W)
'''
flow_norm = torch.norm(flow, p=p, dim=1).unsqueeze(1) + 1e-12
return flow_norm
def get_visible_masks(self, optical_flows, optical_flows_rev):
# get occlusion masks
batch_size, _, img_h, img_w = optical_flows[0].shape
img2_visible_masks, img1_visible_masks = [], []
for s, (optical_flow, optical_flow_rev) in enumerate(zip(optical_flows, optical_flows_rev)):
shape = [batch_size, 1, int(img_h / (2**s)), int(img_w / (2**s))]
img2_visible_masks.append(self.get_occlusion_mask_from_flow(shape, optical_flow))
img1_visible_masks.append(self.get_occlusion_mask_from_flow(shape, optical_flow_rev))
return img2_visible_masks, img1_visible_masks
def get_consistent_masks(self, optical_flows, optical_flows_rev):
# get consist masks
batch_size, _, img_h, img_w = optical_flows[0].shape
img2_consis_masks, img1_consis_masks, fwd_flow_diff_pyramid, bwd_flow_diff_pyramid = [], [], [], []
for s, (optical_flow, optical_flow_rev) in enumerate(zip(optical_flows, optical_flows_rev)):
bwd2fwd_flow = warp_flow(optical_flow_rev, optical_flow)
fwd2bwd_flow = warp_flow(optical_flow, optical_flow_rev)
fwd_flow_diff = torch.abs(bwd2fwd_flow + optical_flow)
fwd_flow_diff_pyramid.append(fwd_flow_diff)
bwd_flow_diff = torch.abs(fwd2bwd_flow + optical_flow_rev)
bwd_flow_diff_pyramid.append(bwd_flow_diff)
# flow consistency condition
bwd_consist_bound = torch.max(self.flow_consist_beta * self.get_flow_norm(optical_flow_rev), torch.from_numpy(np.array([self.flow_consist_alpha])).float().to(optical_flow_rev.get_device()))
fwd_consist_bound = torch.max(self.flow_consist_beta * self.get_flow_norm(optical_flow), torch.from_numpy(np.array([self.flow_consist_alpha])).float().to(optical_flow.get_device()))
with torch.no_grad():
noc_masks_img2 = (self.get_flow_norm(bwd_flow_diff) < bwd_consist_bound).float()
noc_masks_img1 = (self.get_flow_norm(fwd_flow_diff) < fwd_consist_bound).float()
img2_consis_masks.append(noc_masks_img2)
img1_consis_masks.append(noc_masks_img1)
return img2_consis_masks, img1_consis_masks, fwd_flow_diff_pyramid, bwd_flow_diff_pyramid
def generate_img_pyramid(self, img, num_pyramid):
img_h, img_w = img.shape[2], img.shape[3]
img_pyramid = []
for s in range(num_pyramid):
img_new = F.adaptive_avg_pool2d(img, [int(img_h / (2**s)), int(img_w / (2**s))]).data
img_pyramid.append(img_new)
return img_pyramid
def warp_flow_pyramid(self, img_pyramid, flow_pyramid):
img_warped_pyramid = []
for img, flow in zip(img_pyramid, flow_pyramid):
img_warped_pyramid.append(warp_flow(img, flow))
return img_warped_pyramid
def compute_loss_pixel(self, img_pyramid, img_warped_pyramid, occ_mask_list):
loss_list = []
for scale in range(self.num_scales):
img, img_warped, occ_mask = img_pyramid[scale], img_warped_pyramid[scale], occ_mask_list[scale]
divider = occ_mask.mean((1,2,3))
img_diff = torch.abs((img - img_warped)) * occ_mask.repeat(1,3,1,1)
loss_pixel = img_diff.mean((1,2,3)) / (divider + 1e-12) # (B)
loss_list.append(loss_pixel[:,None])
loss = torch.cat(loss_list, 1).sum(1) # (B)
return loss
def compute_loss_ssim(self, img_pyramid, img_warped_pyramid, occ_mask_list):
loss_list = []
for scale in range(self.num_scales):
img, img_warped, occ_mask = img_pyramid[scale], img_warped_pyramid[scale], occ_mask_list[scale]
divider = occ_mask.mean((1,2,3))
occ_mask_pad = occ_mask.repeat(1,3,1,1)
ssim = SSIM(img * occ_mask_pad, img_warped * occ_mask_pad)
loss_ssim = torch.clamp((1.0 - ssim) / 2.0, 0, 1).mean((1,2,3))
loss_ssim = loss_ssim / (divider + 1e-12)
loss_list.append(loss_ssim[:,None])
loss = torch.cat(loss_list, 1).sum(1)
return loss
def gradients(self, img):
dy = img[:,:,1:,:] - img[:,:,:-1,:]
dx = img[:,:,:,1:] - img[:,:,:,:-1]
return dx, dy
def cal_grad2_error(self, flow, img):
img_grad_x, img_grad_y = self.gradients(img)
w_x = torch.exp(-10.0 * torch.abs(img_grad_x).mean(1).unsqueeze(1))
w_y = torch.exp(-10.0 * torch.abs(img_grad_y).mean(1).unsqueeze(1))
dx, dy = self.gradients(flow)
dx2, _ = self.gradients(dx)
_, dy2 = self.gradients(dy)
error = (w_x[:,:,:,1:] * torch.abs(dx2)).mean((1,2,3)) + (w_y[:,:,1:,:] * torch.abs(dy2)).mean((1,2,3))
return error / 2.0
def compute_loss_flow_smooth(self, optical_flows, img_pyramid):
loss_list = []
for scale in range(self.num_scales):
flow, img = optical_flows[scale], img_pyramid[scale]
error = self.cal_grad2_error(flow/20.0, img)
loss_list.append(error[:,None])
loss = torch.cat(loss_list, 1).sum(1)
return loss
def compute_loss_flow_consis(self, fwd_flow_diff_pyramid, occ_mask_list):
loss_list = []
for scale in range(self.num_scales):
fwd_flow_diff, occ_mask = fwd_flow_diff_pyramid[scale], occ_mask_list[scale]
divider = occ_mask.mean((1,2,3))
loss_consis = (fwd_flow_diff * occ_mask).mean((1,2,3))
loss_consis = loss_consis / (divider + 1e-12)
loss_list.append(loss_consis[:,None])
loss = torch.cat(loss_list, 1).sum(1)
return loss
def inference_flow(self, img1, img2):
img_hw = [img1.shape[2], img1.shape[3]]
feature_list_1, feature_list_2 = self.fpyramid(img1), self.fpyramid(img2)
optical_flow = self.pwc_model(feature_list_1, feature_list_2, img_hw)[0]
return optical_flow
def inference_corres(self, img1, img2):
batch_size, img_h, img_w = img1.shape[0], img1.shape[2], img1.shape[3]
# get the optical flows and reverse optical flows for each pair of adjacent images
feature_list_1, feature_list_2 = self.fpyramid(img1), self.fpyramid(img2)
optical_flows = self.pwc_model(feature_list_1, feature_list_2, [img_h, img_w])
optical_flows_rev = self.pwc_model(feature_list_2, feature_list_1, [img_h, img_w])
# get occlusion masks
img2_visible_masks, img1_visible_masks = self.get_visible_masks(optical_flows, optical_flows_rev)
# get consistent masks
img2_consis_masks, img1_consis_masks, fwd_flow_diff_pyramid, bwd_flow_diff_pyramid = self.get_consistent_masks(optical_flows, optical_flows_rev)
# get final valid masks
img2_valid_masks, img1_valid_masks = [], []
for i, (img2_visible_mask, img1_visible_mask, img2_consis_mask, img1_consis_mask) in enumerate(zip(img2_visible_masks, img1_visible_masks, img2_consis_masks, img1_consis_masks)):
img2_valid_masks.append(img2_visible_mask * img2_consis_mask)
img1_valid_masks.append(img1_visible_mask * img1_consis_mask)
return optical_flows[0], optical_flows_rev[0], img1_valid_masks[0], img2_valid_masks[0], fwd_flow_diff_pyramid[0], bwd_flow_diff_pyramid[0]
def forward(self, inputs, output_flow=False, use_flow_loss=True):
images, K_ms, K_inv_ms = inputs
assert (images.shape[1] == 3)
img_h, img_w = int(images.shape[2] / 2), images.shape[3]
img1, img2 = images[:,:,:img_h,:], images[:,:,img_h:,:]
batch_size = img1.shape[0]
#cv2.imwrite('./test1.png', np.transpose(255*img1[0].cpu().detach().numpy(), [1,2,0]).astype(np.uint8))
#cv2.imwrite('./test2.png', np.transpose(255*img2[0].cpu().detach().numpy(), [1,2,0]).astype(np.uint8))
#pdb.set_trace()
# get the optical flows and reverse optical flows for each pair of adjacent images
feature_list_1, feature_list_2 = self.fpyramid(img1), self.fpyramid(img2)
optical_flows = self.pwc_model(feature_list_1, feature_list_2, [img_h, img_w])
optical_flows_rev = self.pwc_model(feature_list_2, feature_list_1, [img_h, img_w])
# get occlusion masks
img2_visible_masks, img1_visible_masks = self.get_visible_masks(optical_flows, optical_flows_rev)
# get consistent masks
img2_consis_masks, img1_consis_masks, fwd_flow_diff_pyramid, bwd_flow_diff_pyramid = self.get_consistent_masks(optical_flows, optical_flows_rev)
# get final valid masks
img2_valid_masks, img1_valid_masks = [], []
for i, (img2_visible_mask, img1_visible_mask, img2_consis_mask, img1_consis_mask) in enumerate(zip(img2_visible_masks, img1_visible_masks, img2_consis_masks, img1_consis_masks)):
if self.dataset == 'nyuv2':
img2_valid_masks.append(img2_visible_mask)
img1_valid_masks.append(img1_visible_mask)
else:
img2_valid_masks.append(img2_visible_mask * img2_consis_mask)
img1_valid_masks.append(img1_visible_mask * img1_consis_mask)
loss_pack = {}
if not use_flow_loss:
loss_pack['loss_pixel'] = torch.zeros([2]).to(img1.get_device()).requires_grad_()
loss_pack['loss_ssim'] = torch.zeros([2]).to(img1.get_device()).requires_grad_()
loss_pack['loss_flow_smooth'] = torch.zeros([2]).to(img1.get_device()).requires_grad_()
loss_pack['loss_flow_consis'] = torch.zeros([2]).to(img1.get_device()).requires_grad_()
return loss_pack, optical_flows[0], optical_flows_rev[0], img1_valid_masks[0], img2_valid_masks[0], fwd_flow_diff_pyramid[0], bwd_flow_diff_pyramid[0]
# warp images
img1_pyramid = self.generate_img_pyramid(img1, len(optical_flows_rev))
img2_pyramid = self.generate_img_pyramid(img2, len(optical_flows))
img1_warped_pyramid = self.warp_flow_pyramid(img2_pyramid, optical_flows)
img2_warped_pyramid = self.warp_flow_pyramid(img1_pyramid, optical_flows_rev)
# compute loss
loss_pack['loss_pixel'] = self.compute_loss_pixel(img1_pyramid, img1_warped_pyramid, img1_valid_masks) + \
self.compute_loss_pixel(img2_pyramid, img2_warped_pyramid, img2_valid_masks)
loss_pack['loss_ssim'] = self.compute_loss_ssim(img1_pyramid, img1_warped_pyramid, img1_valid_masks) + \
self.compute_loss_ssim(img2_pyramid, img2_warped_pyramid, img2_valid_masks)
loss_pack['loss_flow_smooth'] = self.compute_loss_flow_smooth(optical_flows, img1_pyramid) + \
self.compute_loss_flow_smooth(optical_flows_rev, img2_pyramid)
#loss_pack['loss_flow_consis'] = self.compute_loss_flow_consis(fwd_flow_diff_pyramid, img1_valid_masks) + \
# self.compute_loss_flow_consis(bwd_flow_diff_pyramid, img2_valid_masks)
loss_pack['loss_flow_consis'] = torch.zeros([2]).to(img1.get_device()).requires_grad_()
if output_flow:
return loss_pack, optical_flows[0], optical_flows_rev[0], img1_valid_masks[0], img2_valid_masks[0], fwd_flow_diff_pyramid[0], bwd_flow_diff_pyramid[0]
else:
return loss_pack
| 18,005 | 46.384211 | 201 | py |
TrianFlow | TrianFlow-master/core/networks/model_flowposenet.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from structures import *
from pytorch_ssim import SSIM
from model_flow import Model_flow
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'visualize'))
from visualizer import *
from profiler import Profiler
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
import cv2
def mean_on_mask(diff, valid_mask):
mask = valid_mask.expand_as(diff)
mean_value = (diff * mask).sum() / mask.sum()
return mean_value
def edge_aware_smoothness_loss(pred_disp, img, max_scales):
def gradient_x(img):
gx = img[:, :, :-1, :] - img[:, :, 1:, :]
return gx
def gradient_y(img):
gy = img[:, :, :, :-1] - img[:, :, :, 1:]
return gy
def get_edge_smoothness(img, pred):
pred_gradients_x = gradient_x(pred)
pred_gradients_y = gradient_y(pred)
image_gradients_x = gradient_x(img)
image_gradients_y = gradient_y(img)
weights_x = torch.exp(-torch.mean(torch.abs(image_gradients_x),
1, keepdim=True))
weights_y = torch.exp(-torch.mean(torch.abs(image_gradients_y),
1, keepdim=True))
smoothness_x = torch.abs(pred_gradients_x) * weights_x
smoothness_y = torch.abs(pred_gradients_y) * weights_y
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
loss = 0
weight = 1.
s = 0
for scaled_disp in pred_disp:
s += 1
if s > max_scales:
break
b, _, h, w = scaled_disp.size()
scaled_img = nn.functional.adaptive_avg_pool2d(img, (h, w))
loss += get_edge_smoothness(scaled_img, scaled_disp) * weight
weight /= 4.0
return loss
def compute_smooth_loss(tgt_depth, tgt_img, ref_depth, ref_img, max_scales=1):
loss = edge_aware_smoothness_loss(tgt_depth, tgt_img, max_scales)
loss = edge_aware_smoothness_loss(ref_depth, ref_img, max_scales)
return loss
class Model_flowposenet(nn.Module):
def __init__(self, cfg):
super(Model_flowposenet, self).__init__()
assert cfg.depth_scale == 1
self.pose_net = FlowPoseNet()
self.model_flow = Model_flow(cfg)
self.depth_net = Depth_Model(cfg.depth_scale)
def compute_pairwise_loss(self, tgt_img, ref_img, tgt_depth, ref_depth, pose, intrinsic):
ref_img_warped, valid_mask, projected_depth, computed_depth = inverse_warp2(ref_img, tgt_depth, ref_depth,
pose, intrinsic, 'zeros')
diff_img = (tgt_img - ref_img_warped).abs()
diff_depth = ((computed_depth - projected_depth).abs() /
(computed_depth + projected_depth).abs()).clamp(0, 1)
ssim_map = (0.5*(1 - SSIM(tgt_img, ref_img_warped))).clamp(0, 1)
diff_img = (0.15 * diff_img + 0.85 * ssim_map)
# Modified in 01.19.2020
#weight_mask = (1 - diff_depth)
#diff_img = diff_img * weight_mask
# compute loss
reconstruction_loss = diff_img.mean()
geometry_consistency_loss = diff_depth.mean()
#reconstruction_loss = mean_on_mask(diff_img, valid_mask)
#geometry_consistency_loss = mean_on_mask(diff_depth, valid_mask)
return reconstruction_loss, geometry_consistency_loss
def disp2depth(self, disp, min_depth=0.01, max_depth=80.0):
min_disp = 1 / max_depth
max_disp = 1 / min_depth
scaled_disp = min_disp + (max_disp - min_disp) * disp
depth = 1 / scaled_disp
return scaled_disp, depth
def infer_depth(self, img):
b, img_h, img_w = img.shape[0], img.shape[2], img.shape[3]
disp_list = self.depth_net(img)
disp, depth = self.disp2depth(disp_list[0])
return disp_list[0]
def inference(self, img1, img2, K, K_inv):
flow = self.model_flow.inference_flow(img1, img2)
return flow, None, None, None, None, None
def inference_flow(self, img1, img2):
flow = self.model_flow.inference_flow(img1, img2)
return flow
def infer_pose(self, img1, img2, K, K_inv):
img_h, img_w = img1.shape[2], img1.shape[3]
flow = self.model_flow.inference_flow(img1, img2)
flow[:,0,:,:] /= img_w
flow[:,1,:,:] /= img_h
pose = self.pose_net(flow)
return pose
def forward(self, inputs):
# initialization
images, K_ms, K_inv_ms = inputs
K, K_inv = K_ms[:,0,:,:], K_inv_ms[:,0,:,:]
assert (images.shape[1] == 3)
img_h, img_w = int(images.shape[2] / 2), images.shape[3]
img1, img2 = images[:,:,:img_h,:], images[:,:,img_h:,:]
b = img1.shape[0]
visualizer = Visualizer_debug('./vis/', img1=255*img1.permute([0,2,3,1]).detach().cpu().numpy(), \
img2=255*img2.permute([0,2,3,1]).detach().cpu().numpy())
# Flow Network
loss_pack, fwd_flow, bwd_flow, img1_valid_mask, img2_valid_mask, img1_flow_diff_mask, img2_flow_diff_mask = self.model_flow(inputs, output_flow=True, use_flow_loss=False)
fwd_flow[:,0,:,:] /= img_w
fwd_flow[:,1,:,:] /= img_h
bwd_flow[:,0,:,:] /= img_w
bwd_flow[:,1,:,:] /= img_h
# Pose Network
pose = self.pose_net(fwd_flow)
pose_inv = self.pose_net(bwd_flow)
disp1_list = self.depth_net(img1) # Nscales * [B, 1, H, W]
disp2_list = self.depth_net(img2)
disp1, depth1 = self.disp2depth(disp1_list[0])
disp2, depth2 = self.disp2depth(disp2_list[0])
#pdb.set_trace()
loss_1, loss_3 = self.compute_pairwise_loss(img1, img2, depth1, depth2, pose, K)
loss_1_2, loss_3_2 = self.compute_pairwise_loss(img2, img1, depth2, depth1, pose_inv, K)
loss_ph = loss_1 + loss_1_2
loss_pj = loss_3 + loss_3_2
loss_2 = compute_smooth_loss([depth1], img1, [depth2], img2)
loss_pack['pt_depth_loss'] = torch.zeros([2]).to(loss_2.get_device()).requires_grad_()
loss_pack['pj_depth_loss'], loss_pack['flow_error'] = loss_pj, loss_ph
loss_pack['depth_smooth_loss'] = loss_2
#loss_pack['depth_smooth_loss'] = torch.zeros([2]).to(loss_2.get_device()).requires_grad_()
loss_pack['geo_loss'] = torch.zeros([2]).to(loss_2.get_device()).requires_grad_()
return loss_pack
| 6,517 | 35.824859 | 178 | py |
TrianFlow | TrianFlow-master/core/networks/model_triangulate_pose.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import torch
import torch.nn as nn
import numpy as np
from structures import *
from model_flow import Model_flow
import pdb
import cv2
class Model_triangulate_pose(nn.Module):
def __init__(self, cfg):
super(Model_triangulate_pose, self).__init__()
self.model_flow = Model_flow(cfg)
self.mode = cfg.mode
if cfg.dataset == 'nyuv2':
self.inlier_thres = 0.1
self.rigid_thres = 1.0
else:
self.inlier_thres = 0.1
self.rigid_thres = 0.5
self.filter = reduced_ransac(check_num=cfg.ransac_points, thres=self.inlier_thres, dataset=cfg.dataset)
def meshgrid(self, h, w):
xx, yy = np.meshgrid(np.arange(0,w), np.arange(0,h))
meshgrid = np.transpose(np.stack([xx,yy], axis=-1), [2,0,1]) # [2,h,w]
meshgrid = torch.from_numpy(meshgrid)
return meshgrid
def compute_epipolar_loss(self, fmat, match, mask):
# fmat: [b, 3, 3] match: [b, 4, h*w] mask: [b,1,h*w]
num_batch = match.shape[0]
match_num = match.shape[-1]
points1 = match[:,:2,:]
points2 = match[:,2:,:]
ones = torch.ones(num_batch, 1, match_num).to(points1.get_device())
points1 = torch.cat([points1, ones], 1) # [b,3,n]
points2 = torch.cat([points2, ones], 1).transpose(1,2) # [b,n,3]
# compute fundamental matrix loss
fmat = fmat.unsqueeze(1)
fmat_tiles = fmat.view([-1,3,3])
epi_lines = fmat_tiles.bmm(points1) #[b,3,n] [b*n, 3, 1]
dist_p2l = torch.abs((epi_lines.permute([0, 2, 1]) * points2).sum(-1, keepdim=True)) # [b,n,1]
a = epi_lines[:,0,:].unsqueeze(1).transpose(1,2) # [b,n,1]
b = epi_lines[:,1,:].unsqueeze(1).transpose(1,2) # [b,n,1]
dist_div = torch.sqrt(a*a + b*b) + 1e-6
dist_map = dist_p2l / dist_div # [B, n, 1]
loss = (dist_map * mask.transpose(1,2)).mean([1,2]) / mask.mean([1,2])
return loss, dist_map
def get_rigid_mask(self, dist_map):
rigid_mask = (dist_map < self.rigid_thres).float()
inlier_mask = (dist_map < self.inlier_thres).float()
rigid_score = rigid_mask * 1.0 / (1.0 + dist_map)
return rigid_mask, inlier_mask, rigid_score
def inference(self, img1, img2, K, K_inv):
batch_size, img_h, img_w = img1.shape[0], img1.shape[2], img1.shape[3]
fwd_flow, bwd_flow, img1_valid_mask, img2_valid_mask, img1_flow_diff_mask, img2_flow_diff_mask = self.model_flow.inference_corres(img1, img2)
grid = self.meshgrid(img_h, img_w).float().to(img1.get_device()).unsqueeze(0).repeat(batch_size,1,1,1) #[b,2,h,w]
corres = torch.cat([(grid[:,0,:,:] + fwd_flow[:,0,:,:]).clamp(0,img_w-1.0).unsqueeze(1), \
(grid[:,1,:,:] + fwd_flow[:,1,:,:]).clamp(0,img_h-1.0).unsqueeze(1)], 1)
match = torch.cat([grid, corres], 1) # [b,4,h,w]
img1_score_mask = img1_valid_mask * 1.0 / (0.1 + img1_flow_diff_mask.mean(1).unsqueeze(1))
F_final = self.filter(match, img1_score_mask)
geo_loss, rigid_mask = self.compute_epipolar_loss(F_final, match.view([batch_size,4,-1]), img1_valid_mask.view([batch_size,1,-1]))
img1_rigid_mask = (rigid_mask.view([batch_size,img_h,img_w,1]) < self.inlier_thres).float()
return F_final, img1_valid_mask, img1_rigid_mask.permute(0,3,1,2), fwd_flow, match
def forward(self, inputs, output_F=False, visualizer=None):
images, K_ms, K_inv_ms = inputs
K, K_inv = K_ms[:,0,:,:], K_inv_ms[:,0,:,:]
assert (images.shape[1] == 3)
img_h, img_w = int(images.shape[2] / 2), images.shape[3]
img1, img2 = images[:,:,:img_h,:], images[:,:,img_h:,:]
batch_size = img1.shape[0]
if self.mode == 'depth':
loss_pack, fwd_flow, bwd_flow, img1_valid_mask, img2_valid_mask, img1_flow_diff_mask, img2_flow_diff_mask = self.model_flow(inputs, output_flow=True, use_flow_loss=False)
else:
loss_pack, fwd_flow, bwd_flow, img1_valid_mask, img2_valid_mask, img1_flow_diff_mask, img2_flow_diff_mask = self.model_flow(inputs, output_flow=True)
grid = self.meshgrid(img_h, img_w).float().to(img1.get_device()).unsqueeze(0).repeat(batch_size,1,1,1) #[b,2,h,w]
fwd_corres = torch.cat([(grid[:,0,:,:] + fwd_flow[:,0,:,:]).unsqueeze(1), (grid[:,1,:,:] + fwd_flow[:,1,:,:]).unsqueeze(1)], 1)
fwd_match = torch.cat([grid, fwd_corres], 1) # [b,4,h,w]
bwd_corres = torch.cat([(grid[:,0,:,:] + bwd_flow[:,0,:,:]).unsqueeze(1), (grid[:,1,:,:] + bwd_flow[:,1,:,:]).unsqueeze(1)], 1)
bwd_match = torch.cat([grid, bwd_corres], 1) # [b,4,h,w]
# Use fwd-bwd consistency map for filter
img1_score_mask = img1_valid_mask * 1.0 / (0.1+img1_flow_diff_mask.mean(1).unsqueeze(1))
img2_score_mask = img2_valid_mask * 1.0 / (0.1+img2_flow_diff_mask.mean(1).unsqueeze(1))
# img1_score_mask = img1_valid_mask
F_final_1 = self.filter(fwd_match, img1_score_mask, visualizer=visualizer)
_, dist_map_1 = self.compute_epipolar_loss(F_final_1, fwd_match.view([batch_size,4,-1]), img1_valid_mask.view([batch_size,1,-1]))
dist_map_1 = dist_map_1.view([batch_size, img_h, img_w, 1])
# Compute geo loss for regularize correspondence.
rigid_mask_1, inlier_mask_1, rigid_score_1 = self.get_rigid_mask(dist_map_1)
# We only use rigid mask to filter out the moving objects for computing geo loss.
geo_loss = (dist_map_1 * (rigid_mask_1 - inlier_mask_1)).mean((1,2,3)) / \
(rigid_mask_1 - inlier_mask_1).mean((1,2,3))
loss_pack['geo_loss'] = geo_loss
if output_F:
return loss_pack, F_final_1, img1_score_mask, rigid_score_1.permute(0,3,1,2), fwd_flow, fwd_match
else:
return loss_pack
| 5,969 | 47.536585 | 182 | py |
TrianFlow | TrianFlow-master/core/networks/model_depth_pose.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from structures import *
from model_triangulate_pose import Model_triangulate_pose
from pytorch_ssim import SSIM
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'visualize'))
from visualizer import *
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
class Model_depth_pose(nn.Module):
def __init__(self, cfg):
super(Model_depth_pose, self).__init__()
self.depth_match_num = cfg.depth_match_num
self.depth_sample_ratio = cfg.depth_sample_ratio
self.depth_scale = cfg.depth_scale
self.w_flow_error = cfg.w_flow_error
self.dataset = cfg.dataset
self.depth_net = Depth_Model(cfg.depth_scale)
self.model_pose = Model_triangulate_pose(cfg)
def meshgrid(self, h, w):
xx, yy = np.meshgrid(np.arange(0,w), np.arange(0,h))
meshgrid = np.transpose(np.stack([xx,yy], axis=-1), [2,0,1]) # [2,h,w]
meshgrid = torch.from_numpy(meshgrid)
return meshgrid
def robust_rand_sample(self, match, mask, num):
# match: [b, 4, -1] mask: [b, 1, -1]
b, n = match.shape[0], match.shape[2]
nonzeros_num = torch.min(torch.sum(mask > 0, dim=-1)) # []
if nonzeros_num.detach().cpu().numpy() == n:
rand_int = torch.randint(0, n, [num])
select_match = match[:,:,rand_int]
else:
# If there is zero score in match, sample the non-zero matches.
num = np.minimum(nonzeros_num.detach().cpu().numpy(), num)
select_idxs = []
for i in range(b):
nonzero_idx = torch.nonzero(mask[i,0,:]) # [nonzero_num,1]
rand_int = torch.randint(0, nonzero_idx.shape[0], [int(num)])
select_idx = nonzero_idx[rand_int, :] # [num, 1]
select_idxs.append(select_idx)
select_idxs = torch.stack(select_idxs, 0) # [b,num,1]
select_match = torch.gather(match.transpose(1,2), index=select_idxs.repeat(1,1,4), dim=1).transpose(1,2) # [b, 4, num]
return select_match, num
def top_ratio_sample(self, match, mask, ratio):
# match: [b, 4, -1] mask: [b, 1, -1]
b, total_num = match.shape[0], match.shape[-1]
scores, indices = torch.topk(mask, int(ratio*total_num), dim=-1) # [B, 1, ratio*tnum]
select_match = torch.gather(match.transpose(1,2), index=indices.squeeze(1).unsqueeze(-1).repeat(1,1,4), dim=1).transpose(1,2) # [b, 4, ratio*tnum]
return select_match, scores
def rand_sample(self, match, num):
b, c, n = match.shape[0], match.shape[1], match.shape[2]
rand_int = torch.randint(0, match.shape[-1], size=[num])
select_pts = match[:,:,rand_int]
return select_pts
def filt_negative_depth(self, point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord):
# Filter out the negative projection depth.
# point2d_1_depth: [b, n, 1]
b, n = point2d_1_depth.shape[0], point2d_1_depth.shape[1]
mask = (point2d_1_depth > 0.01).float() * (point2d_2_depth > 0.01).float()
select_idxs = []
flag = 0
for i in range(b):
if torch.sum(mask[i,:,0]) == n:
idx = torch.arange(n).to(mask.get_device())
else:
nonzero_idx = torch.nonzero(mask[i,:,0]).squeeze(1) # [k]
if nonzero_idx.shape[0] < 0.1*n:
idx = torch.arange(n).to(mask.get_device())
flag = 1
else:
res = torch.randint(0, nonzero_idx.shape[0], size=[n-nonzero_idx.shape[0]]).to(mask.get_device()) # [n-nz]
idx = torch.cat([nonzero_idx, nonzero_idx[res]], 0)
select_idxs.append(idx)
select_idxs = torch.stack(select_idxs, dim=0) # [b,n]
point2d_1_depth = torch.gather(point2d_1_depth, index=select_idxs.unsqueeze(-1), dim=1) # [b,n,1]
point2d_2_depth = torch.gather(point2d_2_depth, index=select_idxs.unsqueeze(-1), dim=1) # [b,n,1]
point2d_1_coord = torch.gather(point2d_1_coord, index=select_idxs.unsqueeze(-1).repeat(1,1,2), dim=1) # [b,n,2]
point2d_2_coord = torch.gather(point2d_2_coord, index=select_idxs.unsqueeze(-1).repeat(1,1,2), dim=1) # [b,n,2]
return point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, flag
def filt_invalid_coord(self, point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, max_h, max_w):
# Filter out the negative projection depth.
# point2d_1_depth: [b, n, 1]
b, n = point2d_1_coord.shape[0], point2d_1_coord.shape[1]
max_coord = torch.Tensor([max_w, max_h]).to(point2d_1_coord.get_device())
mask = (point2d_1_coord > 0).all(dim=-1, keepdim=True).float() * (point2d_2_coord > 0).all(dim=-1, keepdim=True).float() * \
(point2d_1_coord < max_coord).all(dim=-1, keepdim=True).float() * (point2d_2_coord < max_coord).all(dim=-1, keepdim=True).float()
flag = 0
if torch.sum(1.0-mask) == 0:
return point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, flag
select_idxs = []
for i in range(b):
if torch.sum(mask[i,:,0]) == n:
idx = torch.arange(n).to(mask.get_device())
else:
nonzero_idx = torch.nonzero(mask[i,:,0]).squeeze(1) # [k]
if nonzero_idx.shape[0] < 0.1*n:
idx = torch.arange(n).to(mask.get_device())
flag = 1
else:
res = torch.randint(0, nonzero_idx.shape[0], size=[n-nonzero_idx.shape[0]]).to(mask.get_device())
idx = torch.cat([nonzero_idx, nonzero_idx[res]], 0)
select_idxs.append(idx)
select_idxs = torch.stack(select_idxs, dim=0) # [b,n]
point2d_1_depth = torch.gather(point2d_1_depth, index=select_idxs.unsqueeze(-1), dim=1) # [b,n,1]
point2d_2_depth = torch.gather(point2d_2_depth, index=select_idxs.unsqueeze(-1), dim=1) # [b,n,1]
point2d_1_coord = torch.gather(point2d_1_coord, index=select_idxs.unsqueeze(-1).repeat(1,1,2), dim=1) # [b,n,2]
point2d_2_coord = torch.gather(point2d_2_coord, index=select_idxs.unsqueeze(-1).repeat(1,1,2), dim=1) # [b,n,2]
return point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, flag
def ray_angle_filter(self, match, P1, P2, return_angle=False):
# match: [b, 4, n] P: [B, 3, 4]
b, n = match.shape[0], match.shape[2]
K = P1[:,:,:3] # P1 with identity rotation and zero translation
K_inv = torch.inverse(K)
RT1 = K_inv.bmm(P1) # [b, 3, 4]
RT2 = K_inv.bmm(P2)
ones = torch.ones([b,1,n]).to(match.get_device())
pts1 = torch.cat([match[:,:2,:], ones], 1)
pts2 = torch.cat([match[:,2:,:], ones], 1)
ray1_dir = (RT1[:,:,:3].transpose(1,2)).bmm(K_inv).bmm(pts1)# [b,3,n]
ray1_dir = ray1_dir / (torch.norm(ray1_dir, dim=1, keepdim=True, p=2) + 1e-12)
ray1_origin = (-1) * RT1[:,:,:3].transpose(1,2).bmm(RT1[:,:,3].unsqueeze(-1)) # [b, 3, 1]
ray2_dir = (RT2[:,:,:3].transpose(1,2)).bmm(K_inv).bmm(pts2) # [b,3,n]
ray2_dir = ray2_dir / (torch.norm(ray2_dir, dim=1, keepdim=True, p=2) + 1e-12)
ray2_origin = (-1) * RT2[:,:,:3].transpose(1,2).bmm(RT2[:,:,3].unsqueeze(-1)) # [b, 3, 1]
# We compute the angle betwwen vertical line from ray1 origin to ray2 and ray1.
p1p2 = (ray1_origin - ray2_origin).repeat(1,1,n)
verline = ray2_origin.repeat(1,1,n) + torch.sum(p1p2 * ray2_dir, dim=1, keepdim=True) * ray2_dir - ray1_origin.repeat(1,1,n) # [b,3,n]
cosvalue = torch.sum(ray1_dir * verline, dim=1, keepdim=True) / \
((torch.norm(ray1_dir, dim=1, keepdim=True, p=2) + 1e-12) * (torch.norm(verline, dim=1, keepdim=True, p=2) + 1e-12))# [b,1,n]
mask = (cosvalue > 0.001).float() # we drop out angles less than 1' [b,1,n]
flag = 0
num = torch.min(torch.sum(mask, -1)).int()
if num.cpu().detach().numpy() == 0:
flag = 1
filt_match = match[:,:,:100]
if return_angle:
return filt_match, flag, torch.zeros_like(mask).to(filt_match.get_device())
else:
return filt_match, flag
nonzero_idx = []
for i in range(b):
idx = torch.nonzero(mask[i,0,:])[:num] # [num,1]
nonzero_idx.append(idx)
nonzero_idx = torch.stack(nonzero_idx, 0) # [b,num,1]
filt_match = torch.gather(match.transpose(1,2), index=nonzero_idx.repeat(1,1,4), dim=1).transpose(1,2) # [b,4,num]
if return_angle:
return filt_match, flag, mask
else:
return filt_match, flag
def midpoint_triangulate(self, match, K_inv, P1, P2):
# match: [b, 4, num] P1: [b, 3, 4]
# Match is in the image coordinates. P1, P2 is camera parameters. [B, 3, 4] match: [B, M, 4]
b, n = match.shape[0], match.shape[2]
RT1 = K_inv.bmm(P1) # [b, 3, 4]
RT2 = K_inv.bmm(P2)
ones = torch.ones([b,1,n]).to(match.get_device())
pts1 = torch.cat([match[:,:2,:], ones], 1)
pts2 = torch.cat([match[:,2:,:], ones], 1)
ray1_dir = (RT1[:,:,:3].transpose(1,2)).bmm(K_inv).bmm(pts1)# [b,3,n]
ray1_dir = ray1_dir / (torch.norm(ray1_dir, dim=1, keepdim=True, p=2) + 1e-12)
ray1_origin = (-1) * RT1[:,:,:3].transpose(1,2).bmm(RT1[:,:,3].unsqueeze(-1)) # [b, 3, 1]
ray2_dir = (RT2[:,:,:3].transpose(1,2)).bmm(K_inv).bmm(pts2) # [b,3,n]
ray2_dir = ray2_dir / (torch.norm(ray2_dir, dim=1, keepdim=True, p=2) + 1e-12)
ray2_origin = (-1) * RT2[:,:,:3].transpose(1,2).bmm(RT2[:,:,3].unsqueeze(-1)) # [b, 3, 1]
dir_cross = torch.cross(ray1_dir, ray2_dir, dim=1) # [b,3,n]
denom = 1.0 / (torch.sum(dir_cross * dir_cross, dim=1, keepdim=True)+1e-12) # [b,1,n]
origin_vec = (ray2_origin - ray1_origin).repeat(1,1,n) # [b,3,n]
a1 = origin_vec.cross(ray2_dir, dim=1) # [b,3,n]
a1 = torch.sum(a1 * dir_cross, dim=1, keepdim=True) * denom # [b,1,n]
a2 = origin_vec.cross(ray1_dir, dim=1) # [b,3,n]
a2 = torch.sum(a2 * dir_cross, dim=1, keepdim=True) * denom # [b,1,n]
p1 = ray1_origin + a1 * ray1_dir
p2 = ray2_origin + a2 * ray2_dir
point = (p1 + p2) / 2.0 # [b,3,n]
# Convert to homo coord to get consistent with other functions.
point_homo = torch.cat([point, ones], dim=1).transpose(1,2) # [b,n,4]
return point_homo
def rt_from_fundamental_mat_nyu(self, fmat, K, depth_match):
# F: [b, 3, 3] K: [b, 3, 3] depth_match: [b ,4, n]
#verify_match = self.rand_sample(depth_match, 5000) # [b,4,100]
verify_match = depth_match.transpose(1,2).cpu().detach().numpy()
K_inv = torch.inverse(K)
b = fmat.shape[0]
fmat_ = K.transpose(1,2).bmm(fmat)
essential_mat = fmat_.bmm(K)
iden = torch.cat([torch.eye(3), torch.zeros([3,1])], -1).unsqueeze(0).repeat(b,1,1).to(K.get_device()) # [b,3,4]
P1 = K.bmm(iden)
flags = []
number_inliers = []
P2 = []
for i in range(b):
cnum, R, t, _ = cv2.recoverPose(essential_mat[i].cpu().detach().numpy().astype('float64'), verify_match[i,:,:2].astype('float64'), \
verify_match[i,:,2:].astype('float64'), cameraMatrix=K[i,:,:].cpu().detach().numpy().astype('float64'))
p2 = torch.from_numpy(np.concatenate([R, t], axis=-1)).float().to(K.get_device())
P2.append(p2)
if cnum > depth_match.shape[-1] / 7.0:
flags.append(1)
else:
flags.append(0)
number_inliers.append(cnum)
P2 = K.bmm(torch.stack(P2, axis=0))
#pdb.set_trace()
return P1, P2, flags
def verifyRT(self, match, K_inv, P1, P2):
# match: [b, 4, n] P1: [b,3,4] P2: [b,3,4]
b, n = match.shape[0], match.shape[2]
point3d = self.midpoint_triangulate(match, K_inv, P1, P2).reshape([-1,4]).unsqueeze(-1) # [b*n, 4, 1]
P1_ = P1.repeat(n,1,1)
P2_ = P2.repeat(n,1,1)
depth1 = P1_.bmm(point3d)[:,-1,:] / point3d[:,-1,:] # [b*n, 1]
depth2 = P2_.bmm(point3d)[:,-1,:] / point3d[:,-1,:]
inlier_num = torch.sum((depth1.view([b,n]) > 0).float() * (depth2.view([b,n]) > 0).float(), 1) # [b]
return inlier_num
def rt_from_fundamental_mat(self, fmat, K, depth_match):
# F: [b, 3, 3] K: [b, 3, 3] depth_match: [b ,4, n]
verify_match = self.rand_sample(depth_match, 200) # [b,4,100]
K_inv = torch.inverse(K)
b = fmat.shape[0]
fmat_ = K.transpose(1,2).bmm(fmat)
essential_mat = fmat_.bmm(K)
essential_mat_cpu = essential_mat.cpu()
U, S, V = torch.svd(essential_mat_cpu)
U, S, V = U.to(K.get_device()), S.to(K.get_device()), V.to(K.get_device())
W = torch.from_numpy(np.array([[[0., -1., 0.],[1., 0., 0.],[0., 0., 1.]]])).float().repeat(b,1,1).to(K.get_device())
# R = UWV^t or UW^tV^t t = U[:,2] the third column of U
R1 = U.bmm(W).bmm(V.transpose(1,2)) # Do we need matrix determinant sign?
R1 = torch.sign(torch.det(R1)).unsqueeze(-1).unsqueeze(-1) * R1
R2 = U.bmm(W.transpose(1,2)).bmm(V.transpose(1,2))
R2 = torch.sign(torch.det(R2)).unsqueeze(-1).unsqueeze(-1) * R2
t1 = U[:,:,2].unsqueeze(-1) # The third column
t2 = -U[:,:,2].unsqueeze(-1) # Inverse direction
iden = torch.cat([torch.eye(3), torch.zeros([3,1])], -1).unsqueeze(0).repeat(b,1,1).to(K.get_device()) # [b,3,4]
P1 = K.bmm(iden)
P2_1 = K.bmm(torch.cat([R1, t1], -1))
P2_2 = K.bmm(torch.cat([R2, t1], -1))
P2_3 = K.bmm(torch.cat([R1, t2], -1))
P2_4 = K.bmm(torch.cat([R2, t2], -1))
P2_c = [P2_1, P2_2, P2_3, P2_4]
flags = []
for i in range(4):
with torch.no_grad():
inlier_num = self.verifyRT(verify_match, K_inv, P1, P2_c[i])
flags.append(inlier_num)
P2_c = torch.stack(P2_c, dim=1) # [B, 4, 3, 4]
flags = torch.stack(flags, dim=1) # [B, 4]
idx = torch.argmax(flags, dim=-1, keepdim=True) # [b,1]
P2 = torch.gather(P2_c, index=idx.unsqueeze(-1).unsqueeze(-1).repeat(1,1,3,4), dim=1).squeeze(1) # [b,3,4]
#pdb.set_trace()
return P1, P2
def reproject(self, P, point3d):
# P: [b,3,4] point3d: [b,n,4]
point2d = P.bmm(point3d.transpose(1,2)) # [b,4,n]
point2d_coord = (point2d[:,:2,:] / (point2d[:,2,:].unsqueeze(1) + 1e-12)).transpose(1,2) # [b,n,2]
point2d_depth = point2d[:,2,:].unsqueeze(1).transpose(1,2) # [b,n,1]
return point2d_coord, point2d_depth
def scale_adapt(self, depth1, depth2, eps=1e-12):
with torch.no_grad():
A = torch.sum((depth1 ** 2) / (depth2 ** 2 + eps), dim=1) # [b,1]
C = torch.sum(depth1 / (depth2 + eps), dim=1) # [b,1]
a = C / (A + eps)
return a
def affine_adapt(self, depth1, depth2, use_translation=True, eps=1e-12):
a_scale = self.scale_adapt(depth1, depth2, eps=eps)
if not use_translation: # only fit the scale parameter
return a_scale, torch.zeros_like(a_scale)
else:
with torch.no_grad():
A = torch.sum((depth1 ** 2) / (depth2 ** 2 + eps), dim=1) # [b,1]
B = torch.sum(depth1 / (depth2 ** 2 + eps), dim=1) # [b,1]
C = torch.sum(depth1 / (depth2 + eps), dim=1) # [b,1]
D = torch.sum(1.0 / (depth2 ** 2 + eps), dim=1) # [b,1]
E = torch.sum(1.0 / (depth2 + eps), dim=1) # [b,1]
a = (B*E - D*C) / (B*B - A*D + 1e-12)
b = (B*C - A*E) / (B*B - A*D + 1e-12)
# check ill condition
cond = (B*B - A*D)
valid = (torch.abs(cond) > 1e-4).float()
a = a * valid + a_scale * (1 - valid)
b = b * valid
return a, b
def register_depth(self, depth_pred, coord_tri, depth_tri):
# depth_pred: [b, 1, h, w] coord_tri: [b,n,2] depth_tri: [b,n,1]
batch, _, h, w = depth_pred.shape[0], depth_pred.shape[1], depth_pred.shape[2], depth_pred.shape[3]
n = depth_tri.shape[1]
coord_tri_nor = torch.stack([2.0*coord_tri[:,:,0] / (w-1.0) - 1.0, 2.0*coord_tri[:,:,1] / (h-1.0) - 1.0], -1)
depth_inter = F.grid_sample(depth_pred, coord_tri_nor.view([batch,n,1,2]), padding_mode='reflection').squeeze(-1).transpose(1,2) # [b,n,1]
# Normalize
scale = torch.median(depth_inter, 1)[0] / (torch.median(depth_tri, 1)[0] + 1e-12)
scale = scale.detach() # [b,1]
scale_depth_inter = depth_inter / (scale.unsqueeze(-1) + 1e-12)
scale_depth_pred = depth_pred / (scale.unsqueeze(-1).unsqueeze(-1) + 1e-12)
# affine adapt
a, b = self.affine_adapt(scale_depth_inter, depth_tri, use_translation=False)
affine_depth_inter = a.unsqueeze(1) * scale_depth_inter + b.unsqueeze(1) # [b,n,1]
affine_depth_pred = a.unsqueeze(-1).unsqueeze(-1) * scale_depth_pred + b.unsqueeze(-1).unsqueeze(-1) # [b,1,h,w]
return affine_depth_pred, affine_depth_inter
def get_trian_loss(self, tri_depth, pred_tri_depth):
# depth: [b,n,1]
loss = torch.pow(1.0 - pred_tri_depth / (tri_depth + 1e-12), 2).mean((1,2))
return loss
def get_reproj_fdp_loss(self, pred1, pred2, P2, K, K_inv, valid_mask, rigid_mask, flow, visualizer=None):
# pred: [b,1,h,w] Rt: [b,3,4] K: [b,3,3] mask: [b,1,h,w] flow: [b,2,h,w]
b, h, w = pred1.shape[0], pred1.shape[2], pred1.shape[3]
xy = self.meshgrid(h,w).unsqueeze(0).repeat(b,1,1,1).float().to(flow.get_device()) # [b,2,h,w]
ones = torch.ones([b,1,h,w]).float().to(flow.get_device())
pts1_3d = K_inv.bmm(torch.cat([xy, ones], 1).view([b,3,-1])) * pred1.view([b,1,-1]) # [b,3,h*w]
pts2_coord, pts2_depth = self.reproject(P2, torch.cat([pts1_3d, ones.view([b,1,-1])], 1).transpose(1,2)) # [b,h*w, 2]
# TODO Here some of the reprojection coordinates are invalid. (<0 or >max)
reproj_valid_mask = (pts2_coord > torch.Tensor([0,0]).to(pred1.get_device())).all(-1, True).float() * \
(pts2_coord < torch.Tensor([w-1,h-1]).to(pred1.get_device())).all(-1, True).float() # [b,h*w, 1]
reproj_valid_mask = (valid_mask * reproj_valid_mask.view([b,h,w,1]).permute([0,3,1,2])).detach()
rigid_mask = rigid_mask.detach()
pts2_depth = pts2_depth.transpose(1,2).view([b,1,h,w])
# Get the interpolated depth prediction2
pts2_coord_nor = torch.cat([2.0 * pts2_coord[:,:,0].unsqueeze(-1) / (w - 1.0) - 1.0, 2.0 * pts2_coord[:,:,1].unsqueeze(-1) / (h - 1.0) - 1.0], -1)
inter_depth2 = F.grid_sample(pred2, pts2_coord_nor.view([b, h, w, 2]), padding_mode='reflection') # [b,1,h,w]
pj_loss_map = (torch.abs(1.0 - pts2_depth / (inter_depth2 + 1e-12)) * rigid_mask * reproj_valid_mask)
pj_loss = pj_loss_map.mean((1,2,3)) / ((reproj_valid_mask * rigid_mask).mean((1,2,3))+1e-12)
#pj_loss = (valid_mask * mask * torch.abs(pts2_depth - inter_depth2) / (torch.abs(pts2_depth + inter_depth2)+1e-12)).mean((1,2,3)) / ((valid_mask * mask).mean((1,2,3))+1e-12) # [b]
flow_loss = (rigid_mask * torch.abs(flow + xy - pts2_coord.detach().permute(0,2,1).view([b,2,h,w]))).mean((1,2,3)) / (rigid_mask.mean((1,2,3)) + 1e-12)
return pj_loss, flow_loss
def disp2depth(self, disp, min_depth=0.1, max_depth=100.0):
min_disp = 1 / max_depth
max_disp = 1 / min_depth
scaled_disp = min_disp + (max_disp - min_disp) * disp
depth = 1 / scaled_disp
return scaled_disp, depth
def get_smooth_loss(self, img, disp):
# img: [b,3,h,w] depth: [b,1,h,w]
"""Computes the smoothness loss for a disparity image
The color image is used for edge-aware smoothness
"""
grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])
grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])
grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)
grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)
grad_disp_x *= torch.exp(-grad_img_x)
grad_disp_y *= torch.exp(-grad_img_y)
return grad_disp_x.mean((1,2,3)) + grad_disp_y.mean((1,2,3))
def infer_depth(self, img):
disp_list = self.depth_net(img)
disp, depth = self.disp2depth(disp_list[0])
return disp_list[0]
def infer_vo(self, img1, img2, K, K_inv, match_num=6000):
b, img_h, img_w = img1.shape[0], img1.shape[2], img1.shape[3]
F_final, img1_valid_mask, img1_rigid_mask, fwd_flow, fwd_match = self.model_pose.inference(img1, img2, K, K_inv)
# infer depth
disp1_list = self.depth_net(img1) # Nscales * [B, 1, H, W]
disp2_list = self.depth_net(img2)
disp1, depth1 = self.disp2depth(disp1_list[0])
disp2, depth2 = self.disp2depth(disp2_list[0])
img1_depth_mask = img1_rigid_mask * img1_valid_mask
# [b, 4, match_num]
top_ratio_match, top_ratio_mask = self.top_ratio_sample(fwd_match.view([b,4,-1]), img1_depth_mask.view([b,1,-1]), ratio=0.30) # [b, 4, ratio*h*w]
depth_match, depth_match_num = self.robust_rand_sample(top_ratio_match, top_ratio_mask, num=match_num)
return depth_match, depth1, depth2
def check_rt(self, img1, img2, K, K_inv):
# initialization
b = img1.shape[0]
flag1, flag2, flag3 = 0, 0, 0
images = torch.cat([img1, img2], dim=2)
inputs = [images, K.unsqueeze(1), K_inv.unsqueeze(1)]
# Pose Network
#self.profiler.reset()
loss_pack, F_final, img1_valid_mask, img1_rigid_score, img1_inlier_mask, fwd_flow, fwd_match = self.model_pose(inputs, output_F=True)
# Get masks
img1_depth_mask = img1_rigid_score * img1_valid_mask
# Select top score matches to triangulate depth.
top_ratio_match, top_ratio_mask = self.top_ratio_sample(fwd_match.view([b,4,-1]), img1_depth_mask.view([b,1,-1]), ratio=0.20) # [b, 4, ratio*h*w]
depth_match, depth_match_num = self.robust_rand_sample(top_ratio_match, top_ratio_mask, num=self.depth_match_num)
P1, P2, flags = self.rt_from_fundamental_mat_nyu(F_final.detach(), K, depth_match)
P1 = P1.detach()
P2 = P2.detach()
flags = torch.from_numpy(np.stack(flags, axis=0)).float().to(K.get_device())
return flags
def inference(self, img1, img2, K, K_inv):
b, img_h, img_w = img1.shape[0], img1.shape[2], img1.shape[3]
visualizer = Visualizer_debug('./vis/', np.transpose(255*img1.detach().cpu().numpy(), [0,2,3,1]), \
np.transpose(255*img2.detach().cpu().numpy(), [0,2,3,1]))
F_final, img1_valid_mask, img1_rigid_mask, fwd_flow, fwd_match = self.model_pose.inference(img1, img2, K, K_inv)
# infer depth
disp1_list = self.depth_net(img1) # Nscales * [B, 1, H, W]
disp2_list = self.depth_net(img2)
disp1, _ = self.disp2depth(disp1_list[0])
disp2, _ = self.disp2depth(disp2_list[0])
# Get Camera Matrix
img1_depth_mask = img1_rigid_mask * img1_valid_mask
# [b, 4, match_num]
top_ratio_match, top_ratio_mask = self.top_ratio_sample(fwd_match.view([b,4,-1]), img1_depth_mask.view([b,1,-1]), ratio=0.20) # [b, 4, ratio*h*w]
depth_match, depth_match_num = self.robust_rand_sample(top_ratio_match, top_ratio_mask, num=self.depth_match_num)
if self.dataset == 'nyuv2':
P1, P2, _ = self.rt_from_fundamental_mat_nyu(F_final, K, depth_match)
else:
P1, P2 = self.rt_from_fundamental_mat(F_final, K, depth_match)
Rt = K_inv.bmm(P2)
filt_depth_match, flag1 = self.ray_angle_filter(depth_match, P1, P2) # [b, 4, filt_num]
point3d_1 = self.midpoint_triangulate(filt_depth_match, K_inv, P1, P2)
point2d_1_coord, point2d_1_depth = self.reproject(P1, point3d_1) # [b,n,2], [b,n,1]
point2d_2_coord, point2d_2_depth = self.reproject(P2, point3d_1)
# Filter out some invalid triangulation results to stablize training.
point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, flag2 = self.filt_negative_depth(point2d_1_depth, \
point2d_2_depth, point2d_1_coord, point2d_2_coord)
point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, flag3 = self.filt_invalid_coord(point2d_1_depth, \
point2d_2_depth, point2d_1_coord, point2d_2_coord, max_h=img_h, max_w=img_w)
return fwd_flow, disp1, disp2, Rt, point2d_1_coord, point2d_1_depth
def forward(self, inputs):
# initialization
images, K_ms, K_inv_ms = inputs
K, K_inv = K_ms[:,0,:,:], K_inv_ms[:,0,:,:]
assert (images.shape[1] == 3)
img_h, img_w = int(images.shape[2] / 2), images.shape[3]
img1, img2 = images[:,:,:img_h,:], images[:,:,img_h:,:]
b = img1.shape[0]
flag1, flag2, flag3 = 0, 0, 0
visualizer = Visualizer_debug('./vis/', img1=255*img1.permute([0,2,3,1]).detach().cpu().numpy(), \
img2=255*img2.permute([0,2,3,1]).detach().cpu().numpy())
# Pose Network
loss_pack, F_final, img1_valid_mask, img1_rigid_mask, fwd_flow, fwd_match = self.model_pose(inputs, output_F=True, visualizer=visualizer)
# infer depth
disp1_list = self.depth_net(img1) # Nscales * [B, 1, H, W]
disp2_list = self.depth_net(img2)
# Get masks
img1_depth_mask = img1_rigid_mask * img1_valid_mask
# Select top score matches to triangulate depth.
top_ratio_match, top_ratio_mask = self.top_ratio_sample(fwd_match.view([b,4,-1]), img1_depth_mask.view([b,1,-1]), ratio=self.depth_sample_ratio) # [b, 4, ratio*h*w]
depth_match, depth_match_num = self.robust_rand_sample(top_ratio_match, top_ratio_mask, num=self.depth_match_num)
if self.dataset == 'nyuv2':
P1, P2, flags = self.rt_from_fundamental_mat_nyu(F_final.detach(), K, depth_match)
flags = torch.from_numpy(np.stack(flags, axis=0)).float().to(K.get_device())
else:
P1, P2 = self.rt_from_fundamental_mat(F_final.detach(), K, depth_match)
P1 = P1.detach()
P2 = P2.detach()
# Get triangulated points
filt_depth_match, flag1 = self.ray_angle_filter(depth_match, P1, P2, return_angle=False) # [b, 4, filt_num]
point3d_1 = self.midpoint_triangulate(filt_depth_match, K_inv, P1, P2)
point2d_1_coord, point2d_1_depth = self.reproject(P1, point3d_1) # [b,n,2], [b,n,1]
point2d_2_coord, point2d_2_depth = self.reproject(P2, point3d_1)
# Filter out some invalid triangulation results to stablize training.
point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, flag2 = self.filt_negative_depth(point2d_1_depth, \
point2d_2_depth, point2d_1_coord, point2d_2_coord)
point2d_1_depth, point2d_2_depth, point2d_1_coord, point2d_2_coord, flag3 = self.filt_invalid_coord(point2d_1_depth, \
point2d_2_depth, point2d_1_coord, point2d_2_coord, max_h=img_h, max_w=img_w)
if flag1 + flag2 + flag3 > 0:
loss_pack['pt_depth_loss'] = torch.zeros([2]).to(point3d_1.get_device()).requires_grad_()
loss_pack['pj_depth_loss'] = torch.zeros([2]).to(point3d_1.get_device()).requires_grad_()
loss_pack['flow_error'] = torch.zeros([2]).to(point3d_1.get_device()).requires_grad_()
loss_pack['depth_smooth_loss'] = torch.zeros([2]).to(point3d_1.get_device()).requires_grad_()
return loss_pack
pt_depth_loss = 0
pj_depth_loss = 0
flow_error = 0
depth_smooth_loss = 0
for s in range(self.depth_scale):
disp_pred1 = F.interpolate(disp1_list[s], size=(img_h, img_w), mode='bilinear') # [b,1,h,w]
disp_pred2 = F.interpolate(disp2_list[s], size=(img_h, img_w), mode='bilinear')
scaled_disp1, depth_pred1 = self.disp2depth(disp_pred1)
scaled_disp2, depth_pred2 = self.disp2depth(disp_pred2)
# Rescale predicted depth according to triangulated depth
# [b,1,h,w], [b,n,1]
rescaled_pred1, inter_pred1 = self.register_depth(depth_pred1, point2d_1_coord, point2d_1_depth)
rescaled_pred2, inter_pred2 = self.register_depth(depth_pred2, point2d_2_coord, point2d_2_depth)
# Get Losses
pt_depth_loss += self.get_trian_loss(point2d_1_depth, inter_pred1) + self.get_trian_loss(point2d_2_depth, inter_pred2)
pj_depth, flow_loss = self.get_reproj_fdp_loss(rescaled_pred1, rescaled_pred2, P2, K, K_inv, img1_valid_mask, img1_rigid_mask, fwd_flow, visualizer=visualizer)
depth_smooth_loss += self.get_smooth_loss(img1, disp_pred1 / (disp_pred1.mean((2,3), True) + 1e-12)) + \
self.get_smooth_loss(img2, disp_pred2 / (disp_pred2.mean((2,3), True) + 1e-12))
pj_depth_loss += pj_depth
flow_error += flow_loss
if self.dataset == 'nyuv2':
loss_pack['pt_depth_loss'] = pt_depth_loss * flags
loss_pack['pj_depth_loss'], loss_pack['flow_error'] = pj_depth_loss * flags, flow_error * flags
loss_pack['depth_smooth_loss'] = depth_smooth_loss * flags
else:
loss_pack['pt_depth_loss'] = pt_depth_loss
loss_pack['pj_depth_loss'], loss_pack['flow_error'] = pj_depth_loss, flow_error
loss_pack['depth_smooth_loss'] = depth_smooth_loss
return loss_pack
| 30,237 | 53.978182 | 188 | py |
TrianFlow | TrianFlow-master/core/networks/pytorch_ssim/ssim.py | import torch
import torch.nn as nn
def SSIM(x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = nn.AvgPool2d(3, 1, padding=1)(x)
mu_y = nn.AvgPool2d(3, 1, padding=1)(y)
sigma_x = nn.AvgPool2d(3, 1, padding=1)(x**2) - mu_x**2
sigma_y = nn.AvgPool2d(3, 1, padding=1)(y**2) - mu_y**2
sigma_xy = nn.AvgPool2d(3, 1, padding=1)(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x**2 + mu_y**2 + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
return SSIM
| 535 | 24.52381 | 65 | py |
TrianFlow | TrianFlow-master/core/networks/structures/ransac.py | import torch
import numpy as np
import os, sys
import torch.nn as nn
import pdb
import cv2
class reduced_ransac(nn.Module):
def __init__(self, check_num, thres, dataset):
super(reduced_ransac, self).__init__()
self.check_num = check_num
self.thres = thres
self.dataset = dataset
def robust_rand_sample(self, match, mask, num, robust=True):
# match: [b, 4, -1] mask: [b, 1, -1]
b, n = match.shape[0], match.shape[2]
nonzeros_num = torch.min(torch.sum(mask > 0, dim=-1)) # []
if nonzeros_num.detach().cpu().numpy() == n:
rand_int = torch.randint(0, n, [num])
select_match = match[:,:,rand_int]
else:
# If there is zero score in match, sample the non-zero matches.
select_idxs = []
if robust:
num = np.minimum(nonzeros_num.detach().cpu().numpy(), num)
for i in range(b):
nonzero_idx = torch.nonzero(mask[i,0,:]) # [nonzero_num,1]
rand_int = torch.randint(0, nonzero_idx.shape[0], [int(num)])
select_idx = nonzero_idx[rand_int, :] # [num, 1]
select_idxs.append(select_idx)
select_idxs = torch.stack(select_idxs, 0) # [b,num,1]
select_match = torch.gather(match.transpose(1,2), index=select_idxs.repeat(1,1,4), dim=1).transpose(1,2) # [b, 4, num]
return select_match, num
def top_ratio_sample(self, match, mask, ratio):
# match: [b, 4, -1] mask: [b, 1, -1]
b, total_num = match.shape[0], match.shape[-1]
scores, indices = torch.topk(mask, int(ratio*total_num), dim=-1) # [B, 1, ratio*tnum]
select_match = torch.gather(match.transpose(1,2), index=indices.squeeze(1).unsqueeze(-1).repeat(1,1,4), dim=1).transpose(1,2) # [b, 4, ratio*tnum]
return select_match, scores
def forward(self, match, mask, visualizer=None):
# match: [B, 4, H, W] mask: [B, 1, H, W]
b, h, w = match.shape[0], match.shape[2], match.shape[3]
match = match.view([b, 4, -1]).contiguous()
mask = mask.view([b, 1, -1]).contiguous()
# Sample matches for RANSAC 8-point and best F selection
top_ratio_match, top_ratio_mask = self.top_ratio_sample(match, mask, ratio=0.20) # [b, 4, ratio*H*W]
check_match, check_num = self.robust_rand_sample(top_ratio_match, top_ratio_mask, num=self.check_num) # [b, 4, check_num]
check_match = check_match.contiguous()
cv_f = []
for i in range(b):
if self.dataset == 'nyuv2':
f, m = cv2.findFundamentalMat(check_match[i,:2,:].transpose(0,1).detach().cpu().numpy(), check_match[i,2:,:].transpose(0,1).detach().cpu().numpy(), cv2.FM_LMEDS, 0.99)
else:
f, m = cv2.findFundamentalMat(check_match[i,:2,:].transpose(0,1).detach().cpu().numpy(), check_match[i,2:,:].transpose(0,1).detach().cpu().numpy(), cv2.FM_RANSAC, 0.1, 0.99)
cv_f.append(f)
cv_f = np.stack(cv_f, axis=0)
cv_f = torch.from_numpy(cv_f).float().to(match.get_device())
return cv_f
| 3,145 | 45.955224 | 189 | py |
TrianFlow | TrianFlow-master/core/networks/structures/depth_model.py | '''
This code was ported from existing repos
[LINK] https://github.com/nianticlabs/monodepth2
'''
from __future__ import absolute_import, division, print_function
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import pdb
class ResNetMultiImageInput(models.ResNet):
"""Constructs a resnet model with varying number of input images.
Adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
def __init__(self, block, layers, num_classes=1000, num_input_images=1):
super(ResNetMultiImageInput, self).__init__(block, layers)
self.inplanes = 64
self.conv1 = nn.Conv2d(
num_input_images * 3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def resnet_multiimage_input(num_layers, pretrained=False, num_input_images=1):
"""Constructs a ResNet model.
Args:
num_layers (int): Number of resnet layers. Must be 18 or 50
pretrained (bool): If True, returns a model pre-trained on ImageNet
num_input_images (int): Number of frames stacked as input
"""
assert num_layers in [18, 50], "Can only run with 18 or 50 layer resnet"
blocks = {18: [2, 2, 2, 2], 50: [3, 4, 6, 3]}[num_layers]
block_type = {18: models.resnet.BasicBlock, 50: models.resnet.Bottleneck}[num_layers]
model = ResNetMultiImageInput(block_type, blocks, num_input_images=num_input_images)
if pretrained:
loaded = model_zoo.load_url(models.resnet.model_urls['resnet{}'.format(num_layers)])
loaded['conv1.weight'] = torch.cat(
[loaded['conv1.weight']] * num_input_images, 1) / num_input_images
model.load_state_dict(loaded)
return model
class ResnetEncoder(nn.Module):
"""Pytorch module for a resnet encoder
"""
def __init__(self, num_layers, pretrained, num_input_images=1):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {18: models.resnet18,
34: models.resnet34,
50: models.resnet50,
101: models.resnet101,
152: models.resnet152}
if num_layers not in resnets:
raise ValueError("{} is not a valid number of resnet layers".format(num_layers))
if num_input_images > 1:
self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images)
else:
self.encoder = resnets[num_layers](pretrained)
if num_layers > 34:
self.num_ch_enc[1:] *= 4
def forward(self, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
class ConvBlock(nn.Module):
"""Layer to perform a convolution followed by ELU
"""
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
def upsample(x):
"""Upsample input tensor by a factor of 2
"""
#return F.interpolate(x, scale_factor=2, mode="nearest")
# TODO
return F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.scales = scales
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
# decoder
self.init_decoder()
def init_decoder(self):
self.upconvs = nn.ModuleList()
for i in range(4, -1, -1):
upconvs_now = nn.ModuleList()
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
num_ch_out = self.num_ch_dec[i]
upconvs_now.append(ConvBlock(num_ch_in, num_ch_out))
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
upconvs_now.append(ConvBlock(num_ch_in, num_ch_out))
self.upconvs.append(upconvs_now)
self.dispconvs = nn.ModuleList()
for s in self.scales:
self.dispconvs.append(Conv3x3(self.num_ch_dec[s], self.num_output_channels))
# self.decoder = nn.ModuleList(list(self.convs.values()))
self.sigmoid = nn.Sigmoid()
def forward(self, input_features):
self.outputs = {}
# decoder
x = input_features[-1]
for scale in range(4, -1, -1): # [4, 3, 2, 1, 0]
idx = 4 - scale
x = self.upconvs[idx][0](x)
x = [upsample(x)]
if self.use_skips and scale > 0:
x += [input_features[scale - 1]]
x = torch.cat(x, 1)
x = self.upconvs[idx][1](x)
# get disp
if scale in self.scales:
scale_idx = self.scales.index(scale)
self.outputs[("disp", scale)] = self.sigmoid(self.dispconvs[scale_idx](x))
return self.outputs
class Depth_Model(nn.Module):
def __init__(self, depth_scale, num_layers=18):
super(Depth_Model, self).__init__()
self.depth_scale = depth_scale
self.encoder = ResnetEncoder(num_layers=num_layers, pretrained=False)
self.decoder = DepthDecoder(self.encoder.num_ch_enc, scales=range(depth_scale))
def forward(self, img):
features = self.encoder(img)
outputs = self.decoder(features)
depth_list = []
disp_list = []
for i in range(self.depth_scale):
disp = outputs['disp', i]
#s_disp, depth = self.disp2depth(disp, self.min_depth, self.max_depth)
#depth_list.append(depth)
#disp_list.append(s_disp)
disp_list.append(disp)
return disp_list
| 7,964 | 36.394366 | 92 | py |
TrianFlow | TrianFlow-master/core/networks/structures/flowposenet.py | import torch
import torch.nn as nn
from torch import sigmoid
from torch.nn.init import xavier_uniform_, zeros_
def conv(in_planes, out_planes, kernel_size=3):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, padding=(kernel_size-1)//2, stride=2),
nn.ReLU(inplace=True)
)
def upconv(in_planes, out_planes):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True)
)
class FlowPoseNet(nn.Module):
def __init__(self):
super(FlowPoseNet, self).__init__()
conv_planes = [16, 32, 64, 128, 256, 256, 256]
self.conv1 = conv(2, conv_planes[0], kernel_size=7)
self.conv2 = conv(conv_planes[0], conv_planes[1], kernel_size=5)
self.conv3 = conv(conv_planes[1], conv_planes[2])
self.conv4 = conv(conv_planes[2], conv_planes[3])
self.conv5 = conv(conv_planes[3], conv_planes[4])
self.conv6 = conv(conv_planes[4], conv_planes[5])
self.conv7 = conv(conv_planes[5], conv_planes[6])
self.pose_pred = nn.Conv2d(conv_planes[6], 6, kernel_size=1, padding=0)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
xavier_uniform_(m.weight.data)
if m.bias is not None:
zeros_(m.bias)
def forward(self, flow):
# [B, 2, H, W]
input = flow
out_conv1 = self.conv1(input)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3(out_conv2)
out_conv4 = self.conv4(out_conv3)
out_conv5 = self.conv5(out_conv4)
out_conv6 = self.conv6(out_conv5)
out_conv7 = self.conv7(out_conv6)
pose = self.pose_pred(out_conv7)
pose = pose.mean(3).mean(2)
pose = 0.01 * pose.view(pose.size(0), 6)
return pose
| 1,951 | 30.483871 | 104 | py |
TrianFlow | TrianFlow-master/core/networks/structures/feature_pyramid.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from net_utils import conv
import torch
import torch.nn as nn
class FeaturePyramid(nn.Module):
def __init__(self):
super(FeaturePyramid, self).__init__()
self.conv1 = conv(3, 16, kernel_size=3, stride=2)
self.conv2 = conv(16, 16, kernel_size=3, stride=1)
self.conv3 = conv(16, 32, kernel_size=3, stride=2)
self.conv4 = conv(32, 32, kernel_size=3, stride=1)
self.conv5 = conv(32, 64, kernel_size=3, stride=2)
self.conv6 = conv(64, 64, kernel_size=3, stride=1)
self.conv7 = conv(64, 96, kernel_size=3, stride=2)
self.conv8 = conv(96, 96, kernel_size=3, stride=1)
self.conv9 = conv(96, 128, kernel_size=3, stride=2)
self.conv10 = conv(128, 128, kernel_size=3, stride=1)
self.conv11 = conv(128, 196, kernel_size=3, stride=2)
self.conv12 = conv(196, 196, kernel_size=3, stride=1)
'''
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.constant_(m.weight.data, 0.0)
if m.bias is not None:
m.bias.data.zero_()
'''
def forward(self, img):
cnv2 = self.conv2(self.conv1(img))
cnv4 = self.conv4(self.conv3(cnv2))
cnv6 = self.conv6(self.conv5(cnv4))
cnv8 = self.conv8(self.conv7(cnv6))
cnv10 = self.conv10(self.conv9(cnv8))
cnv12 = self.conv12(self.conv11(cnv10))
return cnv2, cnv4, cnv6, cnv8, cnv10, cnv12
| 1,586 | 40.763158 | 77 | py |
TrianFlow | TrianFlow-master/core/networks/structures/inverse_warp.py | from __future__ import division
import torch
import torch.nn.functional as F
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = torch.arange(0, h).view(1, h, 1).expand(
1, h, w).type_as(depth) # [1, H, W]
j_range = torch.arange(0, w).view(1, 1, w).expand(
1, h, w).type_as(depth) # [1, H, W]
ones = torch.ones(1, h, w).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i, size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert(all(condition)), "wrong size for {}, expected {}, got {}".format(
input_name, 'x'.join(expected), list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
"""Transform coordinates in the pixel frame to the camera frame.
Args:
depth: depth maps -- [B, H, W]
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
Returns:
array of (u,v,1) cam coordinates -- [B, 3, H, W]
"""
b, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) < h:
set_id_grid(depth)
current_pixel_coords = pixel_coords[:, :, :h, :w].expand(
b, 3, h, w).reshape(b, 3, -1) # [B, 3, H*W]
cam_coords = (intrinsics_inv @ current_pixel_coords).reshape(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot @ cam_coords_flat
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
# Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
X_norm = 2*(X / Z)/(w-1) - 1
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.reshape(b, h, w, 2)
def euler2mat(angle):
"""Convert euler angles to rotation matrix.
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
angle: rotation angle along 3 axis (in radians) -- size = [B, 3]
Returns:
Rotation matrix corresponding to the euler angles -- size = [B, 3, 3]
"""
B = angle.size(0)
x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]
cosz = torch.cos(z)
sinz = torch.sin(z)
zeros = z.detach()*0
ones = zeros.detach()+1
zmat = torch.stack([cosz, -sinz, zeros,
sinz, cosz, zeros,
zeros, zeros, ones], dim=1).reshape(B, 3, 3)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zeros, siny,
zeros, ones, zeros,
-siny, zeros, cosy], dim=1).reshape(B, 3, 3)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([ones, zeros, zeros,
zeros, cosx, -sinx,
zeros, sinx, cosx], dim=1).reshape(B, 3, 3)
rotMat = xmat @ ymat @ zmat
return rotMat
def quat2mat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = torch.cat([quat[:, :1].detach()*0 + 1, quat], dim=1)
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:,
1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
return rotMat
def pose_vec2mat(vec, rotation_mode='euler'):
"""
Convert 6DoF parameters to transformation matrix.
Args:s
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 3, 4]
"""
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:, 3:]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B, 3, 3]
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]
return transform_mat
def inverse_warp(img, depth, pose, intrinsics, rotation_mode='euler', padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
img: the source image (where to sample pixels) -- [B, 3, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
Returns:
projected_img: Source image warped to the target image plane
valid_points: Boolean array indicating point validity
"""
check_sizes(img, 'img', 'B3HW')
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B6')
check_sizes(intrinsics, 'intrinsics', 'B33')
batch_size, _, img_height, img_width = img.size()
cam_coords = pixel2cam(depth, intrinsics.inverse()) # [B,3,H,W]
pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4]
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]
rot, tr = proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:]
src_pixel_coords = cam2pixel(
cam_coords, rot, tr, padding_mode) # [B,H,W,2]
projected_img = F.grid_sample(
img, src_pixel_coords, padding_mode=padding_mode)
valid_points = src_pixel_coords.abs().max(dim=-1)[0] <= 1
return projected_img, valid_points
def cam2pixel2(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot @ cam_coords_flat
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
# Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
X_norm = 2*(X / Z)/(w-1) - 1
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1)+(X_norm < -1)).detach()
# make sure that no point in warped image is a combinaison of im and gray
X_norm[X_mask] = 2
Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.reshape(b, h, w, 2), Z.reshape(b, 1, h, w)
def inverse_warp2(img, depth, ref_depth, pose, intrinsics, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
img: the source image (where to sample pixels) -- [B, 3, H, W]
depth: depth map of the target image -- [B, 1, H, W]
ref_depth: the source depth map (where to sample depth) -- [B, 1, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
Returns:
projected_img: Source image warped to the target image plane
valid_mask: Float array indicating point validity
"""
check_sizes(img, 'img', 'B3HW')
check_sizes(depth, 'depth', 'B1HW')
check_sizes(ref_depth, 'ref_depth', 'B1HW')
check_sizes(pose, 'pose', 'B6')
check_sizes(intrinsics, 'intrinsics', 'B33')
batch_size, _, img_height, img_width = img.size()
cam_coords = pixel2cam(depth.squeeze(1), intrinsics.inverse()) # [B,3,H,W]
pose_mat = pose_vec2mat(pose) # [B,3,4]
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]
rot, tr = proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:]
src_pixel_coords, computed_depth = cam2pixel2(
cam_coords, rot, tr, padding_mode) # [B,H,W,2]
projected_img = F.grid_sample(
img, src_pixel_coords, padding_mode=padding_mode)
valid_points = src_pixel_coords.abs().max(dim=-1)[0] <= 1
valid_mask = valid_points.unsqueeze(1).float()
projected_depth = F.grid_sample(
ref_depth, src_pixel_coords, padding_mode=padding_mode).clamp(min=1e-3)
return projected_img, valid_mask, projected_depth, computed_depth | 10,018 | 36.107407 | 119 | py |
TrianFlow | TrianFlow-master/core/networks/structures/pwc_tf.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from net_utils import conv, deconv, warp_flow
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'external'))
# from correlation_package.correlation import Correlation
# from spatial_correlation_sampler import SpatialCorrelationSampler as Correlation
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import pdb
import torch.nn.functional as F
#from spatial_correlation_sampler import spatial_correlation_sample
class PWC_tf(nn.Module):
def __init__(self, md=4):
super(PWC_tf, self).__init__()
self.corr = self.corr_naive
# self.corr = self.correlate
self.leakyRELU = nn.LeakyReLU(0.1)
nd = (2*md+1)**2
#dd = np.cumsum([128,128,96,64,32])
dd = np.array([128,128,96,64,32])
od = nd
self.conv6_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv6_1 = conv(dd[0], 128, kernel_size=3, stride=1)
self.conv6_2 = conv(dd[0]+dd[1],96, kernel_size=3, stride=1)
self.conv6_3 = conv(dd[1]+dd[2],64, kernel_size=3, stride=1)
self.conv6_4 = conv(dd[2]+dd[3],32, kernel_size=3, stride=1)
self.predict_flow6 = self.predict_flow(dd[3]+dd[4])
#self.deconv6 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
#self.upfeat6 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd+128+2
self.conv5_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv5_1 = conv(dd[0], 128, kernel_size=3, stride=1)
self.conv5_2 = conv(dd[0]+dd[1],96, kernel_size=3, stride=1)
self.conv5_3 = conv(dd[1]+dd[2],64, kernel_size=3, stride=1)
self.conv5_4 = conv(dd[2]+dd[3],32, kernel_size=3, stride=1)
self.predict_flow5 = self.predict_flow(dd[3]+dd[4])
#self.deconv5 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
#self.upfeat5 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd+96+2
self.conv4_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv4_1 = conv(dd[0], 128, kernel_size=3, stride=1)
self.conv4_2 = conv(dd[0]+dd[1],96, kernel_size=3, stride=1)
self.conv4_3 = conv(dd[1]+dd[2],64, kernel_size=3, stride=1)
self.conv4_4 = conv(dd[2]+dd[3],32, kernel_size=3, stride=1)
self.predict_flow4 = self.predict_flow(dd[3]+dd[4])
#self.deconv4 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
#self.upfeat4 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd+64+2
self.conv3_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv3_1 = conv(dd[0], 128, kernel_size=3, stride=1)
self.conv3_2 = conv(dd[0]+dd[1],96, kernel_size=3, stride=1)
self.conv3_3 = conv(dd[1]+dd[2],64, kernel_size=3, stride=1)
self.conv3_4 = conv(dd[2]+dd[3],32, kernel_size=3, stride=1)
self.predict_flow3 = self.predict_flow(dd[3]+dd[4])
#self.deconv3 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
#self.upfeat3 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd+32+2
self.conv2_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv2_1 = conv(dd[0], 128, kernel_size=3, stride=1)
self.conv2_2 = conv(dd[0]+dd[1],96, kernel_size=3, stride=1)
self.conv2_3 = conv(dd[1]+dd[2],64, kernel_size=3, stride=1)
self.conv2_4 = conv(dd[2]+dd[3],32, kernel_size=3, stride=1)
self.predict_flow2 = self.predict_flow(dd[3]+dd[4])
#self.deconv2 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.dc_conv1 = conv(dd[4]+2, 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv7 = self.predict_flow(32)
'''
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_zeros_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
'''
def predict_flow(self, in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
def warp(self, x, flow):
return warp_flow(x, flow, use_mask=False)
def corr_naive(self, input1, input2, d=4):
# naive pytorch implementation of the correlation layer.
assert (input1.shape == input2.shape)
batch_size, feature_num, H, W = input1.shape[0:4]
input2 = F.pad(input2, (d,d,d,d), value=0)
cv = []
for i in range(2 * d + 1):
for j in range(2 * d + 1):
cv.append((input1 * input2[:, :, i:(i + H), j:(j + W)]).mean(1).unsqueeze(1))
return torch.cat(cv, 1)
def forward(self, feature_list_1, feature_list_2, img_hw):
c11, c12, c13, c14, c15, c16 = feature_list_1
c21, c22, c23, c24, c25, c26 = feature_list_2
corr6 = self.corr(c16, c26)
x0 = self.conv6_0(corr6)
x1 = self.conv6_1(x0)
x2 = self.conv6_2(torch.cat((x0,x1),1))
x3 = self.conv6_3(torch.cat((x1,x2),1))
x4 = self.conv6_4(torch.cat((x2,x3),1))
flow6 = self.predict_flow6(torch.cat((x3,x4),1))
up_flow6 = F.interpolate(flow6, scale_factor=2.0, mode='bilinear')*2.0
warp5 = self.warp(c25, up_flow6)
corr5 = self.corr(c15, warp5)
x = torch.cat((corr5, c15, up_flow6), 1)
x0 = self.conv5_0(x)
x1 = self.conv5_1(x0)
x2 = self.conv5_2(torch.cat((x0,x1),1))
x3 = self.conv5_3(torch.cat((x1,x2),1))
x4 = self.conv5_4(torch.cat((x2,x3),1))
flow5 = self.predict_flow5(torch.cat((x3,x4),1))
flow5 = flow5 + up_flow6
up_flow5 = F.interpolate(flow5, scale_factor=2.0, mode='bilinear')*2.0
warp4 = self.warp(c24, up_flow5)
corr4 = self.corr(c14, warp4)
x = torch.cat((corr4, c14, up_flow5), 1)
x0 = self.conv4_0(x)
x1 = self.conv4_1(x0)
x2 = self.conv4_2(torch.cat((x0,x1),1))
x3 = self.conv4_3(torch.cat((x1,x2),1))
x4 = self.conv4_4(torch.cat((x2,x3),1))
flow4 = self.predict_flow4(torch.cat((x3,x4),1))
flow4 = flow4 + up_flow5
up_flow4 = F.interpolate(flow4, scale_factor=2.0, mode='bilinear')*2.0
warp3 = self.warp(c23, up_flow4)
corr3 = self.corr(c13, warp3)
x = torch.cat((corr3, c13, up_flow4), 1)
x0 = self.conv3_0(x)
x1 = self.conv3_1(x0)
x2 = self.conv3_2(torch.cat((x0,x1),1))
x3 = self.conv3_3(torch.cat((x1,x2),1))
x4 = self.conv3_4(torch.cat((x2,x3),1))
flow3 = self.predict_flow3(torch.cat((x3,x4),1))
flow3 = flow3 + up_flow4
up_flow3 = F.interpolate(flow3, scale_factor=2.0, mode='bilinear')*2.0
warp2 = self.warp(c22, up_flow3)
corr2 = self.corr(c12, warp2)
x = torch.cat((corr2, c12, up_flow3), 1)
x0 = self.conv2_0(x)
x1 = self.conv2_1(x0)
x2 = self.conv2_2(torch.cat((x0,x1),1))
x3 = self.conv2_3(torch.cat((x1,x2),1))
x4 = self.conv2_4(torch.cat((x2,x3),1))
flow2 = self.predict_flow2(torch.cat((x3,x4),1))
flow2 = flow2 + up_flow3
x = self.dc_conv4(self.dc_conv3(self.dc_conv2(self.dc_conv1(torch.cat([flow2, x4], 1)))))
flow2 = flow2 + self.dc_conv7(self.dc_conv6(self.dc_conv5(x)))
img_h, img_w = img_hw[0], img_hw[1]
flow2 = F.interpolate(flow2 * 4.0, [img_h, img_w], mode='bilinear')
flow3 = F.interpolate(flow3 * 4.0, [img_h // 2, img_w // 2], mode='bilinear')
flow4 = F.interpolate(flow4 * 4.0, [img_h // 4, img_w // 4], mode='bilinear')
flow5 = F.interpolate(flow5 * 4.0, [img_h // 8, img_w // 8], mode='bilinear')
return [flow2, flow3, flow4, flow5]
| 8,423 | 45.541436 | 97 | py |
TrianFlow | TrianFlow-master/core/networks/structures/net_utils.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import pdb
import numpy as np
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True)
def warp_flow(x, flow, use_mask=False):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
Inputs:
x: [B, C, H, W] (im2)
flow: [B, 2, H, W] flow
Returns:
ouptut: [B, C, H, W]
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if grid.shape != flow.shape:
raise ValueError('the shape of grid {0} is not equal to the shape of flow {1}.'.format(grid.shape, flow.shape))
if x.is_cuda:
grid = grid.to(x.get_device())
vgrid = grid + flow
# scale grid to [-1,1]
vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone() / max(W-1,1)-1.0
vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone() / max(H-1,1)-1.0
vgrid = vgrid.permute(0,2,3,1)
output = nn.functional.grid_sample(x, vgrid)
if use_mask:
mask = torch.autograd.Variable(torch.ones(x.size())).to(x.get_device())
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output * mask
else:
return output
if __name__ == '__main__':
x = np.ones([1,1,10,10])
flow = np.stack([np.ones([1,10,10])*3.0, np.zeros([1,10,10])], axis=1)
y = warp_flow(torch.from_numpy(x).cuda().float(),torch.from_numpy(flow).cuda().float()).cpu().detach().numpy()
print(y)
| 2,088 | 32.693548 | 119 | py |
TrianFlow | TrianFlow-master/core/dataset/kitti_raw.py | import os, sys
import numpy as np
import imageio
from tqdm import tqdm
import torch.multiprocessing as mp
import pdb
def process_folder(q, static_frames, test_scenes, data_dir, output_dir, stride=1):
while True:
if q.empty():
break
folder = q.get()
if folder in static_frames.keys():
static_ids = static_frames[folder]
else:
static_ids = []
scene = folder.split('/')[1]
if scene[:-5] in test_scenes:
continue
image_path = os.path.join(data_dir, folder, 'image_02/data')
dump_image_path = os.path.join(output_dir, folder)
if not os.path.isdir(dump_image_path):
os.makedirs(dump_image_path)
f = open(os.path.join(dump_image_path, 'train.txt'), 'w')
# Note. the os.listdir method returns arbitary order of list. We need correct order.
numbers = len(os.listdir(image_path))
for n in range(numbers - stride):
s_idx = n
e_idx = s_idx + stride
if '%.10d'%s_idx in static_ids or '%.10d'%e_idx in static_ids:
#print('%.10d'%s_idx)
continue
curr_image = imageio.imread(os.path.join(image_path, '%.10d'%s_idx)+'.png')
next_image = imageio.imread(os.path.join(image_path, '%.10d'%e_idx)+'.png')
seq_images = np.concatenate([curr_image, next_image], axis=0)
imageio.imsave(os.path.join(dump_image_path, '%.10d'%s_idx)+'.png', seq_images.astype('uint8'))
# Write training files
date = folder.split('/')[0]
f.write('%s %s\n' % (os.path.join(folder, '%.10d'%s_idx)+'.png', os.path.join(date, 'calib_cam_to_cam.txt')))
print(folder)
class KITTI_RAW(object):
def __init__(self, data_dir, static_frames_txt, test_scenes_txt):
self.data_dir = data_dir
self.static_frames_txt = static_frames_txt
self.test_scenes_txt = test_scenes_txt
def __len__(self):
raise NotImplementedError
def collect_static_frame(self):
f = open(self.static_frames_txt)
static_frames = {}
for line in f.readlines():
line = line.strip()
date, drive, frame_id = line.split(' ')
curr_fid = '%.10d' % (np.int(frame_id))
if os.path.join(date, drive) not in static_frames.keys():
static_frames[os.path.join(date, drive)] = []
static_frames[os.path.join(date, drive)].append(curr_fid)
return static_frames
def collect_test_scenes(self):
f = open(self.test_scenes_txt)
test_scenes = []
for line in f.readlines():
line = line.strip()
test_scenes.append(line)
return test_scenes
def prepare_data_mp(self, output_dir, stride=1):
num_processes = 16
processes = []
q = mp.Queue()
static_frames = self.collect_static_frame()
test_scenes = self.collect_test_scenes()
if not os.path.isfile(os.path.join(output_dir, 'train.txt')):
os.makedirs(output_dir)
#f = open(os.path.join(output_dir, 'train.txt'), 'w')
print('Preparing sequence data....')
if not os.path.isdir(self.data_dir):
raise
dirlist = os.listdir(self.data_dir)
total_dirlist = []
# Get the different folders of images
for d in dirlist:
seclist = os.listdir(os.path.join(self.data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(self.data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
q.put(os.path.join(d, s))
# Process every folder
for rank in range(num_processes):
p = mp.Process(target=process_folder, args=(q, static_frames, test_scenes, self.data_dir, output_dir, stride))
p.start()
processes.append(p)
for p in processes:
p.join()
# Collect the training frames.
f = open(os.path.join(output_dir, 'train.txt'), 'w')
for date in os.listdir(output_dir):
if os.path.isdir(os.path.join(output_dir, date)):
drives = os.listdir(os.path.join(output_dir, date))
for d in drives:
train_file = open(os.path.join(output_dir, date, d, 'train.txt'), 'r')
for l in train_file.readlines():
f.write(l)
# Get calib files
for date in os.listdir(self.data_dir):
command = 'cp ' + os.path.join(self.data_dir, date, 'calib_cam_to_cam.txt') + ' ' + os.path.join(output_dir, date, 'calib_cam_to_cam.txt')
os.system(command)
print('Data Preparation Finished.')
def prepare_data(self, output_dir):
static_frames = self.collect_static_frame()
test_scenes = self.collect_test_scenes()
if not os.path.isfile(os.path.join(output_dir, 'train.txt')):
os.makedirs(output_dir)
f = open(os.path.join(output_dir, 'train.txt'), 'w')
print('Preparing sequence data....')
if not os.path.isdir(self.data_dir):
raise
dirlist = os.listdir(self.data_dir)
total_dirlist = []
# Get the different folders of images
for d in dirlist:
seclist = os.listdir(os.path.join(self.data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(self.data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
# Process every folder
for folder in tqdm(total_dirlist):
if folder in static_frames.keys():
static_ids = static_frames[folder]
else:
static_ids = []
scene = folder.split('/')[1]
if scene in test_scenes:
continue
image_path = os.path.join(self.data_dir, folder, 'image_02/data')
dump_image_path = os.path.join(output_dir, folder)
if not os.path.isdir(dump_image_path):
os.makedirs(dump_image_path)
# Note. the os.listdir method returns arbitary order of list. We need correct order.
numbers = len(os.listdir(image_path))
for n in range(numbers - 1):
s_idx = n
e_idx = s_idx + 1
if '%.10d'%s_idx in static_ids or '%.10d'%e_idx in static_ids:
print('%.10d'%s_idx)
continue
curr_image = imageio.imread(os.path.join(image_path, '%.10d'%s_idx)+'.png')
next_image = imageio.imread(os.path.join(image_path, '%.10d'%e_idx)+'.png')
seq_images = np.concatenate([curr_image, next_image], axis=0)
imageio.imsave(os.path.join(dump_image_path, '%.10d'%s_idx)+'.png', seq_images.astype('uint8'))
# Write training files
date = folder.split('/')[0]
f.write('%s %s\n' % (os.path.join(folder, '%.10d'%s_idx)+'.png', os.path.join(date, 'calib_cam_to_cam.txt')))
print(folder)
# Get calib files
for date in os.listdir(self.data_dir):
command = 'cp ' + os.path.join(self.data_dir, date, 'calib_cam_to_cam.txt') + ' ' + os.path.join(output_dir, date, 'calib_cam_to_cam.txt')
os.system(command)
return os.path.join(output_dir, 'train.txt')
def __getitem__(self, idx):
raise NotImplementedError
if __name__ == '__main__':
data_dir = '/home4/zhaow/data/kitti'
dirlist = os.listdir('/home4/zhaow/data/kitti')
output_dir = '/home4/zhaow/data/kitti_seq/data_generated_s2'
total_dirlist = []
# Get the different folders of images
for d in dirlist:
seclist = os.listdir(os.path.join(data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
F = open(os.path.join(output_dir, 'train.txt'), 'w')
for p in total_dirlist:
traintxt = os.path.join(os.path.join(output_dir, p), 'train.txt')
f = open(traintxt, 'r')
for line in f.readlines():
F.write(line)
print(traintxt)
| 8,568 | 40.8 | 150 | py |
TrianFlow | TrianFlow-master/core/dataset/nyu_v2.py | import os, sys
import numpy as np
import imageio
import cv2
import copy
import h5py
import scipy.io as sio
import torch
import torch.utils.data
import pdb
from tqdm import tqdm
import torch.multiprocessing as mp
def collect_image_list(path):
# Get ppm images list of a folder.
files = os.listdir(path)
sorted_file = sorted([f for f in files])
image_list = []
for l in sorted_file:
if l.split('.')[-1] == 'ppm':
image_list.append(l)
return image_list
def process_folder(q, data_dir, output_dir, stride, train_scenes):
# Directly process the original nyu v2 depth dataset.
while True:
if q.empty():
break
folder = q.get()
scene_name = folder.split('/')[-1]
s1,s2 = scene_name.split('_')[:-1], scene_name.split('_')[-1]
scene_name_full = ''
for j in s1:
scene_name_full = scene_name_full + j + '_'
scene_name_full = scene_name_full + s2[:4]
if scene_name_full not in train_scenes:
continue
image_path = os.path.join(data_dir, folder)
dump_image_path = os.path.join(output_dir, folder)
if not os.path.isdir(dump_image_path):
os.makedirs(dump_image_path)
f = open(os.path.join(dump_image_path, 'train.txt'), 'w')
# Note. the os.listdir method returns arbitary order of list. We need correct order.
image_list = collect_image_list(image_path)
#image_list = open(os.path.join(image_path, 'index.txt')).readlines()
numbers = len(image_list) - 1 # The last ppm file seems truncated.
for n in range(numbers - stride):
s_idx = n
e_idx = s_idx + stride
s_name = image_list[s_idx].strip()
e_name = image_list[e_idx].strip()
curr_image = imageio.imread(os.path.join(image_path, s_name))
next_image = imageio.imread(os.path.join(image_path, e_name))
#curr_image = cv2.imread(os.path.join(image_path, s_name))
#next_image = cv2.imread(os.path.join(image_path, e_name))
seq_images = np.concatenate([curr_image, next_image], axis=0)
imageio.imsave(os.path.join(dump_image_path, os.path.splitext(s_name)[0]+'.png'), seq_images.astype('uint8'))
#cv2.imwrite(os.path.join(dump_image_path, os.path.splitext(s_name)[0]+'.png'), seq_images.astype('uint8'))
# Write training files
#date = folder.split('_')[2]
f.write('%s %s\n' % (os.path.join(folder, os.path.splitext(s_name)[0]+'.png'), 'calib_cam_to_cam.txt'))
print(folder)
class NYU_Prepare(object):
def __init__(self, data_dir, test_dir):
self.data_dir = data_dir
self.test_data = os.path.join(test_dir, 'nyu_depth_v2_labeled.mat')
self.splits = os.path.join(test_dir, 'splits.mat')
self.get_all_scenes()
self.get_test_scenes()
self.get_train_scenes()
def __len__(self):
raise NotImplementedError
def get_all_scenes(self):
self.all_scenes = []
paths = os.listdir(self.data_dir)
for p in paths:
if os.path.isdir(os.path.join(self.data_dir, p)):
pp = os.listdir(os.path.join(self.data_dir, p))
for path in pp:
self.all_scenes.append(path)
def get_test_scenes(self):
self.test_scenes = []
test_data = h5py.File(self.test_data, 'r')
test_split = sio.loadmat(self.splits)['testNdxs']
test_split = np.array(test_split).squeeze(1)
test_scenes = test_data['scenes'][0][test_split-1]
for i in range(len(test_scenes)):
obj = test_data[test_scenes[i]]
name = "".join(chr(j) for j in obj[:])
if name not in self.test_scenes:
self.test_scenes.append(name)
#pdb.set_trace()
def get_train_scenes(self):
self.train_scenes = []
train_data = h5py.File(self.test_data, 'r')
train_split = sio.loadmat(self.splits)['trainNdxs']
train_split = np.array(train_split).squeeze(1)
train_scenes = train_data['scenes'][0][train_split-1]
for i in range(len(train_scenes)):
obj = train_data[train_scenes[i]]
name = "".join(chr(j) for j in obj[:])
if name not in self.train_scenes:
self.train_scenes.append(name)
def prepare_data_mp(self, output_dir, stride=1):
num_processes = 32
processes = []
q = mp.Queue()
if not os.path.isfile(os.path.join(output_dir, 'train.txt')):
os.makedirs(output_dir)
#f = open(os.path.join(output_dir, 'train.txt'), 'w')
print('Preparing sequence data....')
if not os.path.isdir(self.data_dir):
raise
dirlist = os.listdir(self.data_dir)
total_dirlist = []
# Get the different folders of images
for d in dirlist:
if not os.path.isdir(os.path.join(self.data_dir, d)):
continue
seclist = os.listdir(os.path.join(self.data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(self.data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
q.put(os.path.join(d, s))
# Process every folder
for rank in range(num_processes):
p = mp.Process(target=process_folder, args=(q, self.data_dir, output_dir, stride, self.train_scenes))
p.start()
processes.append(p)
for p in processes:
p.join()
# Collect the training frames.
f = open(os.path.join(output_dir, 'train.txt'), 'w')
for dirlist in os.listdir(output_dir):
if os.path.isdir(os.path.join(output_dir, dirlist)):
seclists = os.listdir(os.path.join(output_dir, dirlist))
for s in seclists:
train_file = open(os.path.join(output_dir, dirlist, s, 'train.txt'), 'r')
for l in train_file.readlines():
f.write(l)
f.close()
f = open(os.path.join(output_dir, 'calib_cam_to_cam.txt'), 'w')
f.write('P_rect: 5.1885790117450188e+02 0.0 3.2558244941119034e+02 0.0 0.0 5.1946961112127485e+02 2.5373616633400465e+02 0.0 0.0 0.0 1.0 0.0')
f.close()
print('Data Preparation Finished.')
def __getitem__(self, idx):
raise NotImplementedError
class NYU_v2(torch.utils.data.Dataset):
def __init__(self, data_dir, num_scales=3, img_hw=(448, 576), num_iterations=None):
super(NYU_v2, self).__init__()
self.data_dir = data_dir
self.num_scales = num_scales
self.img_hw = img_hw
self.num_iterations = num_iterations
self.undist_coeff = np.array([2.07966153e-01, -5.8613825e-01, 7.223136313e-04, 1.047962719e-03, 4.98569866e-01])
self.mapx, self.mapy = None, None
self.roi = None
info_file = os.path.join(self.data_dir, 'train.txt')
self.data_list = self.get_data_list(info_file)
def get_data_list(self, info_file):
with open(info_file, 'r') as f:
lines = f.readlines()
data_list = []
for line in lines:
k = line.strip('\n').split()
data = {}
data['image_file'] = os.path.join(self.data_dir, k[0])
data['cam_intrinsic_file'] = os.path.join(self.data_dir, k[1])
data_list.append(data)
print('A total of {} image pairs found'.format(len(data_list)))
return data_list
def count(self):
return len(self.data_list)
def rand_num(self, idx):
num_total = self.count()
np.random.seed(idx)
num = np.random.randint(num_total)
return num
def __len__(self):
if self.num_iterations is None:
return self.count()
else:
return self.num_iterations
def resize_img(self, img, img_hw):
'''
Input size (N*H, W, 3)
Output size (N*H', W', 3), where (H', W') == self.img_hw
'''
img_h, img_w = img.shape[0], img.shape[1]
img_hw_orig = (int(img_h / 2), img_w)
img1, img2 = img[:img_hw_orig[0], :, :], img[img_hw_orig[0]:, :, :]
img1_new = cv2.resize(img1, (img_hw[1], img_hw[0]))
img2_new = cv2.resize(img2, (img_hw[1], img_hw[0]))
img_new = np.concatenate([img1_new, img2_new], 0)
return img_new
def random_flip_img(self, img):
is_flip = (np.random.rand() > 0.5)
if is_flip:
img = cv2.flip(img, 1)
return img
def undistort_img(self, img, K):
img_h, img_w = img.shape[0], img.shape[1]
img_hw_orig = (int(img_h / 2), img_w)
img1, img2 = img[:img_hw_orig[0], :, :], img[img_hw_orig[0]:, :, :]
h, w = img_hw_orig
if self.mapx is None:
newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix(K, self.undist_coeff, (w,h), 1, (w,h))
self.mapx, self.mapy = cv2.initUndistortRectifyMap(K, self.undist_coeff, None, newcameramtx, (w,h), 5)
img1_undist = cv2.remap(img1, self.mapx, self.mapy, cv2.INTER_LINEAR)
img2_undist = cv2.remap(img2, self.mapx, self.mapy, cv2.INTER_LINEAR)
x,y,w,h = self.roi
img1_undist = img1_undist[y:y+h, x:x+w]
img2_undist = img2_undist[y:y+h, x:x+w]
img_undist = np.concatenate([img1_undist, img2_undist], 0)
#cv2.imwrite('./test.png', img)
#cv2.imwrite('./test_undist.png', img_undist)
#pdb.set_trace()
return img_undist
def preprocess_img(self, img, K, img_hw=None, is_test=False):
if img_hw is None:
img_hw = self.img_hw
if not is_test:
#img = img
img = self.undistort_img(img, K)
#img = self.random_flip_img(img)
img = self.resize_img(img, img_hw)
img = img / 255.0
return img
def read_cam_intrinsic(self, fname):
with open(fname, 'r') as f:
lines = f.readlines()
data = lines[-1].strip('\n').split(' ')[1:]
data = [float(k) for k in data]
data = np.array(data).reshape(3,4)
cam_intrinsics = data[:3,:3]
return cam_intrinsics
def rescale_intrinsics(self, K, img_hw_orig, img_hw_new):
K_new = copy.deepcopy(K)
K_new[0,:] = K_new[0,:] * img_hw_new[0] / img_hw_orig[0]
K_new[1,:] = K_new[1,:] * img_hw_new[1] / img_hw_orig[1]
return K_new
def get_intrinsics_per_scale(self, K, scale):
K_new = copy.deepcopy(K)
K_new[0,:] = K_new[0,:] / (2**scale)
K_new[1,:] = K_new[1,:] / (2**scale)
K_new_inv = np.linalg.inv(K_new)
return K_new, K_new_inv
def get_multiscale_intrinsics(self, K, num_scales):
K_ms, K_inv_ms = [], []
for s in range(num_scales):
K_new, K_new_inv = self.get_intrinsics_per_scale(K, s)
K_ms.append(K_new[None,:,:])
K_inv_ms.append(K_new_inv[None,:,:])
K_ms = np.concatenate(K_ms, 0)
K_inv_ms = np.concatenate(K_inv_ms, 0)
return K_ms, K_inv_ms
def __getitem__(self, idx):
'''
Returns:
- img torch.Tensor (N * H, W, 3)
- K torch.Tensor (num_scales, 3, 3)
- K_inv torch.Tensor (num_scales, 3, 3)
'''
if idx >= self.num_iterations:
raise IndexError
if self.num_iterations is not None:
idx = self.rand_num(idx)
data = self.data_list[idx]
# load img
img = cv2.imread(data['image_file'])
img_hw_orig = (int(img.shape[0] / 2), img.shape[1])
# load intrinsic
cam_intrinsic_orig = self.read_cam_intrinsic(data['cam_intrinsic_file'])
cam_intrinsic = self.rescale_intrinsics(cam_intrinsic_orig, img_hw_orig, self.img_hw)
K_ms, K_inv_ms = self.get_multiscale_intrinsics(cam_intrinsic, self.num_scales) # (num_scales, 3, 3), (num_scales, 3, 3)
# image preprocessing
img = self.preprocess_img(img, cam_intrinsic_orig, self.img_hw) # (img_h * 2, img_w, 3)
img = img.transpose(2,0,1)
return torch.from_numpy(img).float(), torch.from_numpy(K_ms).float(), torch.from_numpy(K_inv_ms).float()
if __name__ == '__main__':
data_dir = '/home4/zhaow/data/kitti'
dirlist = os.listdir('/home4/zhaow/data/kitti')
output_dir = '/home4/zhaow/data/kitti_seq/data_generated_s2'
total_dirlist = []
# Get the different folders of images
for d in dirlist:
seclist = os.listdir(os.path.join(data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
F = open(os.path.join(output_dir, 'train.txt'), 'w')
for p in total_dirlist:
traintxt = os.path.join(os.path.join(output_dir, p), 'train.txt')
f = open(traintxt, 'r')
for line in f.readlines():
F.write(line)
print(traintxt)
| 13,298 | 36.997143 | 150 | py |
TrianFlow | TrianFlow-master/core/dataset/kitti_2012.py | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from kitti_prepared import KITTI_Prepared
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'evaluation'))
from evaluate_flow import get_scaled_intrinsic_matrix, eval_flow_avg
import numpy as np
import cv2
import copy
import torch
import pdb
class KITTI_2012(KITTI_Prepared):
def __init__(self, data_dir, img_hw=(256, 832), init=True):
self.data_dir = data_dir
self.img_hw = img_hw
self.num_total = 194
if init:
self.data_list = self.get_data_list()
def get_data_list(self):
data_list = []
for i in range(self.num_total):
data = {}
data['img1_dir'] = os.path.join(self.data_dir, 'image_2', str(i).zfill(6) + '_10.png')
data['img2_dir'] = os.path.join(self.data_dir, 'image_2', str(i).zfill(6) + '_11.png')
data['calib_file_dir'] = os.path.join(self.data_dir, 'calib_cam_to_cam', str(i).zfill(6) + '.txt')
data_list.append(data)
return data_list
def __len__(self):
return len(self.data_list)
def read_cam_intrinsic(self, calib_file):
input_intrinsic = get_scaled_intrinsic_matrix(calib_file, zoom_x=1.0, zoom_y=1.0)
return input_intrinsic
def __getitem__(self, idx):
'''
Returns:
- img torch.Tensor (N * H, W, 3)
- K torch.Tensor (num_scales, 3, 3)
- K_inv torch.Tensor (num_scales, 3, 3)
'''
data = self.data_list[idx]
# load img
img1 = cv2.imread(data['img1_dir'])
img2 = cv2.imread(data['img2_dir'])
img_hw_orig = (img1.shape[0], img1.shape[1])
img = np.concatenate([img1, img2], 0)
img = self.preprocess_img(img, self.img_hw, is_test=True)
img = img.transpose(2,0,1)
# load intrinsic
cam_intrinsic = self.read_cam_intrinsic(data['calib_file_dir'])
cam_intrinsic = self.rescale_intrinsics(cam_intrinsic, img_hw_orig, self.img_hw)
K, K_inv = self.get_intrinsics_per_scale(cam_intrinsic, scale=0) # (3, 3), (3, 3)
return torch.from_numpy(img).float(), torch.from_numpy(K).float(), torch.from_numpy(K_inv).float()
if __name__ == '__main__':
pass
| 2,288 | 35.333333 | 110 | py |
TrianFlow | TrianFlow-master/core/dataset/kitti_odo.py | import os, sys
import numpy as np
import imageio
from tqdm import tqdm
import torch.multiprocessing as mp
def process_folder(q, data_dir, output_dir, stride=1):
while True:
if q.empty():
break
folder = q.get()
image_path = os.path.join(data_dir, folder, 'image_2/')
dump_image_path = os.path.join(output_dir, folder)
if not os.path.isdir(dump_image_path):
os.makedirs(dump_image_path)
f = open(os.path.join(dump_image_path, 'train.txt'), 'w')
# Note. the os.listdir method returns arbitary order of list. We need correct order.
numbers = len(os.listdir(image_path))
for n in range(numbers - stride):
s_idx = n
e_idx = s_idx + stride
curr_image = imageio.imread(os.path.join(image_path, '%.6d'%s_idx)+'.png')
next_image = imageio.imread(os.path.join(image_path, '%.6d'%e_idx)+'.png')
seq_images = np.concatenate([curr_image, next_image], axis=0)
imageio.imsave(os.path.join(dump_image_path, '%.6d'%s_idx)+'.png', seq_images.astype('uint8'))
# Write training files
f.write('%s %s\n' % (os.path.join(folder, '%.6d'%s_idx)+'.png', os.path.join(folder, 'calib.txt')))
print(folder)
class KITTI_Odo(object):
def __init__(self, data_dir):
self.data_dir = data_dir
self.train_seqs = ['00','01','02','03','04','05','06','07','08']
def __len__(self):
raise NotImplementedError
def prepare_data_mp(self, output_dir, stride=1):
num_processes = 16
processes = []
q = mp.Queue()
if not os.path.isfile(os.path.join(output_dir, 'train.txt')):
os.makedirs(output_dir)
#f = open(os.path.join(output_dir, 'train.txt'), 'w')
print('Preparing sequence data....')
if not os.path.isdir(self.data_dir):
raise
dirlist = os.listdir(self.data_dir)
total_dirlist = []
# Get the different folders of images
for d in dirlist:
if d in self.train_seqs:
q.put(d)
# Process every folder
for rank in range(num_processes):
p = mp.Process(target=process_folder, args=(q, self.data_dir, output_dir, stride))
p.start()
processes.append(p)
for p in processes:
p.join()
f = open(os.path.join(output_dir, 'train.txt'), 'w')
for d in self.train_seqs:
train_file = open(os.path.join(output_dir, d, 'train.txt'), 'r')
for l in train_file.readlines():
f.write(l)
command = 'cp ' + os.path.join(self.data_dir, d, 'calib.txt') + ' ' + os.path.join(output_dir, d, 'calib.txt')
os.system(command)
print('Data Preparation Finished.')
def __getitem__(self, idx):
raise NotImplementedError
if __name__ == '__main__':
data_dir = '/home4/zhaow/data/kitti'
dirlist = os.listdir('/home4/zhaow/data/kitti')
output_dir = '/home4/zhaow/data/kitti_seq/data_generated_s2'
total_dirlist = []
# Get the different folders of images
for d in dirlist:
seclist = os.listdir(os.path.join(data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
F = open(os.path.join(output_dir, 'train.txt'), 'w')
for p in total_dirlist:
traintxt = os.path.join(os.path.join(output_dir, p), 'train.txt')
f = open(traintxt, 'r')
for line in f.readlines():
F.write(line)
print(traintxt) | 3,745 | 37.22449 | 122 | py |
TrianFlow | TrianFlow-master/core/dataset/kitti_prepared.py | import os, sys
import numpy as np
import cv2
import copy
import torch
import torch.utils.data
import pdb
class KITTI_Prepared(torch.utils.data.Dataset):
def __init__(self, data_dir, num_scales=3, img_hw=(256, 832), num_iterations=None):
super(KITTI_Prepared, self).__init__()
self.data_dir = data_dir
self.num_scales = num_scales
self.img_hw = img_hw
self.num_iterations = num_iterations
info_file = os.path.join(self.data_dir, 'train.txt')
#info_file = os.path.join(self.data_dir, 'train_flow.txt')
self.data_list = self.get_data_list(info_file)
def get_data_list(self, info_file):
with open(info_file, 'r') as f:
lines = f.readlines()
data_list = []
for line in lines:
k = line.strip('\n').split()
data = {}
data['image_file'] = os.path.join(self.data_dir, k[0])
data['cam_intrinsic_file'] = os.path.join(self.data_dir, k[1])
data_list.append(data)
print('A total of {} image pairs found'.format(len(data_list)))
return data_list
def count(self):
return len(self.data_list)
def rand_num(self, idx):
num_total = self.count()
np.random.seed(idx)
num = np.random.randint(num_total)
return num
def __len__(self):
if self.num_iterations is None:
return self.count()
else:
return self.num_iterations
def resize_img(self, img, img_hw):
'''
Input size (N*H, W, 3)
Output size (N*H', W', 3), where (H', W') == self.img_hw
'''
img_h, img_w = img.shape[0], img.shape[1]
img_hw_orig = (int(img_h / 2), img_w)
img1, img2 = img[:img_hw_orig[0], :, :], img[img_hw_orig[0]:, :, :]
img1_new = cv2.resize(img1, (img_hw[1], img_hw[0]))
img2_new = cv2.resize(img2, (img_hw[1], img_hw[0]))
img_new = np.concatenate([img1_new, img2_new], 0)
return img_new
def random_flip_img(self, img):
is_flip = (np.random.rand() > 0.5)
if is_flip:
img = cv2.flip(img, 1)
return img
def preprocess_img(self, img, img_hw=None, is_test=False):
if img_hw is None:
img_hw = self.img_hw
img = self.resize_img(img, img_hw)
if not is_test:
img = self.random_flip_img(img)
img = img / 255.0
return img
def read_cam_intrinsic(self, fname):
with open(fname, 'r') as f:
lines = f.readlines()
data = lines[-1].strip('\n').split(' ')[1:]
data = [float(k) for k in data]
data = np.array(data).reshape(3,4)
cam_intrinsics = data[:3,:3]
return cam_intrinsics
def rescale_intrinsics(self, K, img_hw_orig, img_hw_new):
K[0,:] = K[0,:] * img_hw_new[1] / img_hw_orig[1]
K[1,:] = K[1,:] * img_hw_new[0] / img_hw_orig[0]
return K
def get_intrinsics_per_scale(self, K, scale):
K_new = copy.deepcopy(K)
K_new[0,:] = K_new[0,:] / (2**scale)
K_new[1,:] = K_new[1,:] / (2**scale)
K_new_inv = np.linalg.inv(K_new)
return K_new, K_new_inv
def get_multiscale_intrinsics(self, K, num_scales):
K_ms, K_inv_ms = [], []
for s in range(num_scales):
K_new, K_new_inv = self.get_intrinsics_per_scale(K, s)
K_ms.append(K_new[None,:,:])
K_inv_ms.append(K_new_inv[None,:,:])
K_ms = np.concatenate(K_ms, 0)
K_inv_ms = np.concatenate(K_inv_ms, 0)
return K_ms, K_inv_ms
def __getitem__(self, idx):
'''
Returns:
- img torch.Tensor (N * H, W, 3)
- K torch.Tensor (num_scales, 3, 3)
- K_inv torch.Tensor (num_scales, 3, 3)
'''
if self.num_iterations is not None:
idx = self.rand_num(idx)
data = self.data_list[idx]
# load img
img = cv2.imread(data['image_file'])
img_hw_orig = (int(img.shape[0] / 2), img.shape[1])
img = self.preprocess_img(img, self.img_hw) # (img_h * 2, img_w, 3)
img = img.transpose(2,0,1)
# load intrinsic
cam_intrinsic = self.read_cam_intrinsic(data['cam_intrinsic_file'])
cam_intrinsic = self.rescale_intrinsics(cam_intrinsic, img_hw_orig, self.img_hw)
K_ms, K_inv_ms = self.get_multiscale_intrinsics(cam_intrinsic, self.num_scales) # (num_scales, 3, 3), (num_scales, 3, 3)
return torch.from_numpy(img).float(), torch.from_numpy(K_ms).float(), torch.from_numpy(K_inv_ms).float()
if __name__ == '__main__':
pass
| 4,630 | 33.559701 | 128 | py |
TrianFlow | TrianFlow-master/core/visualize/profiler.py | import os
import time
import torch
import pdb
class Profiler(object):
def __init__(self, silent=False):
self.silent = silent
torch.cuda.synchronize()
self.start = time.time()
self.cache_time = self.start
def reset(self, silent=None):
if silent is None:
silent = self.silent
self.__init__(silent=silent)
def report_process(self, process_name):
if self.silent:
return None
torch.cuda.synchronize()
now = time.time()
print('{0}\t: {1:.4f}'.format(process_name, now - self.cache_time))
self.cache_time = now
def report_all(self, whole_process_name):
if self.silent:
return None
torch.cuda.synchronize()
now = time.time()
print('{0}\t: {1:.4f}'.format(whole_process_name, now - self.start))
pdb.set_trace()
| 887 | 25.117647 | 76 | py |
AlignShift | AlignShift-master/setup.py | import os
import platform
import subprocess
import time
from setuptools import Extension, dist, find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
dist.Distribution().fetch_build_eggs(['Cython', 'numpy>=1.11.1'])
import numpy as np # noqa: E402, isort:skip
from Cython.Build import cythonize # noqa: E402, isort:skip
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
MAJOR = 1
MINOR = 0
PATCH = ''
SUFFIX = 'rc1'
if PATCH:
SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX)
else:
SHORT_VERSION = '{}.{}{}'.format(MAJOR, MINOR, SUFFIX)
version_file = 'mmdet/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmdet.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
"""
sha = get_hash()
VERSION = SHORT_VERSION + '+' + sha
with open(version_file, 'w') as f:
f.write(content.format(time.asctime(), VERSION, SHORT_VERSION))
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources):
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
def make_cython_ext(name, module, sources):
extra_compile_args = None
if platform.system() != 'Windows':
extra_compile_args = {
'cxx': ['-Wno-unused-function', '-Wno-write-strings']
}
extension = Extension(
'{}.{}'.format(module, name),
[os.path.join(*module.split('.'), p) for p in sources],
include_dirs=[np.get_include()],
language='c++',
extra_compile_args=extra_compile_args)
extension, = cythonize(extension)
return extension
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='mmdet',
version=get_version(),
description='Open MMLab Detection Toolbox and Benchmark',
long_description=readme(),
author='OpenMMLab',
author_email='chenkaidev@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'mmdet.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
license='Apache License 2.0',
setup_requires=['pytest-runner', 'cython', 'numpy'],
tests_require=['pytest', 'xdoctest'],
install_requires=get_requirements(),
ext_modules=[
make_cython_ext(
name='soft_nms_cpu',
module='mmdet.ops.nms',
sources=['src/soft_nms_cpu.pyx']),
make_cuda_ext(
name='nms_cpu',
module='mmdet.ops.nms',
sources=['src/nms_cpu.cpp']),
make_cuda_ext(
name='nms_cuda',
module='mmdet.ops.nms',
sources=['src/nms_cuda.cpp', 'src/nms_kernel.cu']),
make_cuda_ext(
name='roi_align_cuda',
module='mmdet.ops.roi_align',
sources=['src/roi_align_cuda.cpp', 'src/roi_align_kernel.cu']),
make_cuda_ext(
name='roi_pool_cuda',
module='mmdet.ops.roi_pool',
sources=['src/roi_pool_cuda.cpp', 'src/roi_pool_kernel.cu']),
make_cuda_ext(
name='deform_conv_cuda',
module='mmdet.ops.dcn',
sources=[
'src/deform_conv_cuda.cpp',
'src/deform_conv_cuda_kernel.cu'
]),
make_cuda_ext(
name='deform_pool_cuda',
module='mmdet.ops.dcn',
sources=[
'src/deform_pool_cuda.cpp',
'src/deform_pool_cuda_kernel.cu'
]),
make_cuda_ext(
name='sigmoid_focal_loss_cuda',
module='mmdet.ops.sigmoid_focal_loss',
sources=[
'src/sigmoid_focal_loss.cpp',
'src/sigmoid_focal_loss_cuda.cu'
]),
make_cuda_ext(
name='masked_conv2d_cuda',
module='mmdet.ops.masked_conv',
sources=[
'src/masked_conv2d_cuda.cpp', 'src/masked_conv2d_kernel.cu'
]),
],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 6,587 | 30.222749 | 79 | py |
AlignShift | AlignShift-master/nn/utiles.py | from torch._six import container_abcs
import collections.abc
from itertools import repeat
def as_triple(x, d_value=1):
if isinstance(x, container_abcs.Iterable):
x = list(x)
if len(x)==2:
x = [d_value] + x
return x
else:
return [d_value] + [x] * 2
def _ntuple_same(n):
def parse(x):
if isinstance(x, int):
return tuple(repeat(x, n))
elif isinstance(x, collections.abc.Iterable):
assert len(set(x))==1, 'the size of kernel must be the same for each side'
return tuple(repeat(x[0], n))
return parse
def _to_ntuple(n):
def parse(x):
if isinstance(x, int):
return tuple(repeat(x, n))
elif isinstance(x, collections.abc.Iterable):
if len(set(x))==1:
return tuple(repeat(x[0], n))
else:
assert len(x)==n , 'wrong format'
return x
return parse
_pair_same = _ntuple_same(2)
_triple_same = _ntuple_same(3)
_to_pair = _to_ntuple(2)
_to_triple = _to_ntuple(3)
| 1,076 | 25.268293 | 86 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.